diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..f8295d9 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x diff --git a/README.md b/README.md index a055a31..a83dfd4 100644 --- a/README.md +++ b/README.md @@ -1 +1,107 @@ -# spidomtr \ No newline at end of file +# spidomtr +[![Build Status](https://travis-ci.org/spider-pigs/spidomtr.svg?branch=master)](https://travis-ci.org/spider-pigs/spidomtr) [![Go Report Card](https://goreportcard.com/badge/github.com/spider-pigs/spidomtr)](https://goreportcard.com/report/github.com/spider-pigs/spidomtr) [![GoDoc](https://godoc.org/github.com/spider-pigs/spidomtr?status.svg)](https://godoc.org/github.com/spider-pigs/spidomtr) + +spidomtr is a golang lib for benchmarking and load testing. + +```console + .__ .___ __ + ____________ |__| __| _/____ ______/ |________ + / ___/\____ \| |/ __ |/ _ \ / \ __\_ __ \ + \___ \ | |_> > / /_/ ( <_> ) Y Y \ | | | \/ +/____ >| __/|__\____ |\____/|__|_| /__| |__| + \/ |__| \/ \/ + +[====================================================================] 100% 3s + +Summary: + Count: 500 + Total: 3.391898304s + Slowest: 104 ms + Fastest: 21 ms + Average: 61 ms + Req/sec: 147.41 + +Response time histogram: + 21 ms [1] | + 37 ms [94] |∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ + 54 ms [120] |∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ + 70 ms [95] |∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ + 87 ms [104] |∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ + 104 ms [86] |∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎∎ + +Latency distribution: + 10% in 30 ms + 25% in 42 ms + 50% in 59 ms + 75% in 81 ms + 90% in 93 ms + 95% in 97 ms + 99% in 101 ms + +Responses: + OK: 500 + Errored: 0 + Skipped: 0 + +Tests: + √ awesome_test + Count: 500 + OK: 500 + Errored: 0 + Skipped: 0 + Slowest: 104 ms + Fastest: 21 ms + Average: 61 ms + 90%: 93 ms + 95%: 97 ms + 99%: 101 ms + Req/sec: 147.41 +``` + +# Install +```golang +import "github.com/spider-pigs/spidomtr" +``` + +# Usage +```golang +package main + +import ( + "context" + + "github.com/spider-pigs/spidomtr" + "github.com/spider-pigs/spidomtr/pkg/handlers" + "github.com/spider-pigs/spidomtr/pkg/testunit" +) + +func main() { + // Create the test runner + runner := spidomtr.NewRunner( + spidomtr.ID("awesome tests"), + spidomtr.Description("just running some awesome tests"), + spidomtr.Handlers( + handlers.ProgressBar(), // Displays progress bar during test run + ), + spidomtr.Iterations(50), // Run the test 50 times + spidomtr.Timeout(time.Second*20), // Set a test timeout + spidomtr.Users(10), // Simulate 10 concurrent users + ) + + // Create a test + test := testunit.New( + testunit.Test(func(ctx context.Context, args []interface{}) ([]interface{}, error) { + if err := doSomethingCool(ctx); err != nil { + // Test failed + return args, err + } + // Test passed + return args, nil + }), + ) + + // Run the test + res := runner.Run(context.Background(), test) + ... +} +``` diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..aca1a09 --- /dev/null +++ b/go.mod @@ -0,0 +1,8 @@ +module github.com/spider-pigs/spidomtr + +require ( + github.com/cheggaaa/pb/v3 v3.0.4 + github.com/google/uuid v1.1.1 + github.com/stretchr/testify v1.5.1 + github.com/thepatrik/strcolor v1.0.3 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..f4b2f1c --- /dev/null +++ b/go.sum @@ -0,0 +1,32 @@ +github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/cheggaaa/pb/v3 v3.0.4 h1:QZEPYOj2ix6d5oEg63fbHmpolrnNiwjUsk+h74Yt4bM= +github.com/cheggaaa/pb/v3 v3.0.4/go.mod h1:7rgWxLrAUcFMkvJuv09+DYi7mMUYi8nO9iOWcvGJPfw= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/thepatrik/strcolor v1.0.3 h1:lFuGZwKJn1CwbXYagm8jVKmLh9FPCrd3T7FGSm4jEjc= +github.com/thepatrik/strcolor v1.0.3/go.mod h1:I519L4XnoZZmMJauibTGgy7XODjJ1st3IusYns7MOrg= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/internal/runner/runner.go b/internal/runner/runner.go new file mode 100644 index 0000000..61b3a22 --- /dev/null +++ b/internal/runner/runner.go @@ -0,0 +1,97 @@ +package runner + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/spider-pigs/spidomtr/pkg/testunit" +) + +// TestUnitDone type +type TestUnitDone func(testunit.TestUnit, *Timer, error) + +// Runner type +type Runner struct { + Iterations int + TestUnitDone TestUnitDone + Timeout time.Duration +} + +// New constructs a new +func New(timeout time.Duration, iterations int) *Runner { + return &Runner{ + Iterations: iterations, + Timeout: timeout, + } +} + +// Run runs test units. +func (runner Runner) Run(ctx context.Context, tests ...testunit.TestUnit) *Timer { + totalTimer := NewTimer() + totalTimer.Begin() + + for i := 0; i < runner.Iterations; i++ { + for _, t := range tests { + var err error + timer := NewTimer() + + enabled, _ := t.Enabled() + if enabled { + timer, err = runTestUnit(ctx, t, runner.Timeout) + } + if runner.TestUnitDone != nil { + runner.TestUnitDone(t, timer, err) + } + } + } + + totalTimer.Finish() + return totalTimer +} + +func runTestUnit(ctx context.Context, t testunit.TestUnit, timeout time.Duration) (*Timer, error) { + timer := NewTimer() + var err error + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + defer func() { + if r := recover(); r != nil { + panicstr := fmt.Sprintf("%s", r) + err = errors.New("func panic: " + panicstr) + } + }() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // Run prepare func + var args []interface{} + args, err = t.Prepare(ctx) + if err != nil { + return + } + + // Run main func + timer.Begin() + args, err = t.Test(ctx, args) + timer.Finish() + + if err != nil { + return + } + + // Run cleanup func + err = t.Cleanup(ctx, args) + }() + wg.Wait() + if err != nil { + return NewTimer(), err + } + + return timer, nil +} diff --git a/internal/runner/timer.go b/internal/runner/timer.go new file mode 100644 index 0000000..3b8fe22 --- /dev/null +++ b/internal/runner/timer.go @@ -0,0 +1,26 @@ +package runner + +import "time" + +// Timer type +type Timer struct { + Start time.Time + End time.Time + Duration time.Duration +} + +// NewTimer creates timer +func NewTimer() *Timer { + return &Timer{} +} + +// Begin starts timer +func (timer *Timer) Begin() { + timer.Start = time.Now() +} + +// Finish stops timer +func (timer *Timer) Finish() { + timer.End = time.Now() + timer.Duration = timer.End.Sub(timer.Start) +} diff --git a/pkg/handlers/logger.go b/pkg/handlers/logger.go new file mode 100644 index 0000000..9495ca0 --- /dev/null +++ b/pkg/handlers/logger.go @@ -0,0 +1,75 @@ +package handlers + +import ( + "bytes" + "fmt" + "log" + "os" + "strings" + "unicode/utf8" + + "github.com/spider-pigs/spidomtr" + "github.com/spider-pigs/spidomtr/pkg/testunit" + "github.com/thepatrik/strcolor" +) + +const ( + checkMark = "√" + crossMark = "☓" + skipMark = "-" +) + +// TestLogger type +type TestLogger struct { + Log *log.Logger + Buffer *bytes.Buffer +} + +// Logger logs tests during the test run +func Logger() spidomtr.RunnerHandler { + return &TestLogger{} +} + +// RunnerStarted is called when runner is started (prior to any tests +// have been run). +func (logger *TestLogger) RunnerStarted(id, description string, count int) { + logger.Log = log.New(os.Stdout, "", 0) + logger.Buffer = &bytes.Buffer{} + + fmt.Fprintf(logger.Buffer, "Running %s: %s...\n", id, description) + logger.Log.Printf("Running %s: %s\n", id, description) +} + +// TestDone is called when a test has been completed. +func (logger *TestLogger) TestDone(res spidomtr.TestResult) { + switch res.Outcome { + case testunit.Skip: + fmt.Fprintf(logger.Buffer, "%s %s: %s\n", skipMark, res.ID, res.Comment) + logger.Log.Printf("%s %s: %s\n", strcolor.Yellow(skipMark), res.ID, res.Comment) + case testunit.Fail: + fmt.Fprintf(logger.Buffer, "%s %s: %s\n", crossMark, res.ID, res.Error) + logger.Log.Printf("%s %s: %s\n", strcolor.Red(crossMark), res.ID, res.Error) + case testunit.Pass: + fmt.Fprintf(logger.Buffer, "%s %s: %s\n", checkMark, res.ID, res.Duration) + logger.Log.Printf("%s %s: %s\n", strcolor.Green(checkMark), res.ID, res.Duration) + } +} + +// RunnerDone is called when the runner has run all tests. +func (logger *TestLogger) RunnerDone(res spidomtr.Result) { + mark := func() strcolor.StrColor { + switch { + case res.Stats.Errors > 0: + return strcolor.Red(crossMark) + case res.Stats.Skips > 0: + return strcolor.Yellow(skipMark) + } + return strcolor.Green(checkMark) + }() + + str := fmt.Sprintf("%d/%d passed, %d failed, %d skipped, took %s, avg %s/test", res.Stats.Passed, res.Stats.Count, res.Stats.Errors, res.Stats.Skips, res.Stats.Duration, res.Stats.Average) + divider := strings.Repeat("-", utf8.RuneCountInString(str)+2) + + fmt.Fprintf(logger.Buffer, "%s\n%s %s\n%s\n", divider, mark.Val, str, divider) + logger.Log.Printf("%s\n%s %s\n%s\n", divider, mark, str, divider) +} diff --git a/pkg/handlers/progress.go b/pkg/handlers/progress.go new file mode 100644 index 0000000..0b0b7e2 --- /dev/null +++ b/pkg/handlers/progress.go @@ -0,0 +1,32 @@ +package handlers + +import ( + "github.com/cheggaaa/pb/v3" + "github.com/spider-pigs/spidomtr" +) + +type progressBar struct { + bar *pb.ProgressBar +} + +// ProgressBar is a runner handler that displays a running progress +// bar. +func ProgressBar() spidomtr.RunnerHandler { + return &progressBar{} +} + +// RunnerStarted is called when runner is started (prior to any tests +// have been run). +func (b *progressBar) RunnerStarted(id, description string, count int) { + b.bar = pb.StartNew(count) +} + +// TestDone is called when a test has been completed. +func (b *progressBar) TestDone(spidomtr.TestResult) { + b.bar.Increment() +} + +// RunnerDone is called when the runner has run all tests. +func (b *progressBar) RunnerDone(spidomtr.Result) { + b.bar.Finish() +} diff --git a/pkg/testunit/testoutcome.go b/pkg/testunit/testoutcome.go new file mode 100644 index 0000000..bc0ec55 --- /dev/null +++ b/pkg/testunit/testoutcome.go @@ -0,0 +1,25 @@ +package testunit + +// TestOutcome type +type TestOutcome int + +// Fail is a failed test +const Fail TestOutcome = 0 + +// Skip is a skipped test +const Skip TestOutcome = 1 + +// Pass is a passed test +const Pass TestOutcome = 2 + +func (x TestOutcome) String() string { + switch x { + case Fail: + return "fail" + case Skip: + return "skip" + case Pass: + return "pass" + } + panic("unkown type") +} diff --git a/pkg/testunit/testunit.go b/pkg/testunit/testunit.go new file mode 100644 index 0000000..fba519f --- /dev/null +++ b/pkg/testunit/testunit.go @@ -0,0 +1,114 @@ +package testunit + +import ( + "context" + + "github.com/google/uuid" +) + +// TestUnit type +type TestUnit interface { + // ID returns identifier + ID() string + // Enabled? + Enabled() (bool, string) + // Prepare runs prior to Run(context.Context, []interface{}) error + Prepare(context.Context) ([]interface{}, error) + // Test is the main run func + Test(context.Context, []interface{}) ([]interface{}, error) + // Cleanup runs after Run(context.Context, []interface{}) error + Cleanup(context.Context, []interface{}) error +} + +// Config type +type Config struct { + Enabled func() (bool, string) + ID string + Prepare func(context.Context) ([]interface{}, error) + Test func(context.Context, []interface{}) ([]interface{}, error) + Cleanup func(context.Context, []interface{}) error +} + +// Option type +type Option func(*Config) + +// Enabled adds enabled func +func Enabled(f func() (bool, string)) Option { + return func(cfg *Config) { + cfg.Enabled = f + } +} + +// ID adds an id +func ID(id string) Option { + return func(cfg *Config) { + cfg.ID = id + } +} + +// Prepare adds a prepare func +func Prepare(f func(context.Context) ([]interface{}, error)) Option { + return func(cfg *Config) { + cfg.Prepare = f + } +} + +// Test adds a test func +func Test(f func(context.Context, []interface{}) ([]interface{}, error)) Option { + return func(cfg *Config) { + cfg.Test = f + } +} + +// Cleanup adds a cleanup func +func Cleanup(f func(context.Context, []interface{}) error) Option { + return func(cfg *Config) { + cfg.Cleanup = f + } +} + +// TestAssembly type +type TestAssembly struct { + cfg *Config +} + +// New constructs a new test +func New(options ...Option) *TestAssembly { + cfg := &Config{ + ID: uuid.New().String(), + Enabled: func() (bool, string) { return true, "" }, + Prepare: func(context.Context) ([]interface{}, error) { return nil, nil }, + Test: func(context.Context, []interface{}) ([]interface{}, error) { return nil, nil }, + Cleanup: func(context.Context, []interface{}) error { return nil }, + } + + for _, opt := range options { + opt(cfg) + } + return &TestAssembly{cfg: cfg} +} + +// ID returns identifier +func (test *TestAssembly) ID() string { + return test.cfg.ID +} + +// Enabled return if test is enabled? +func (test *TestAssembly) Enabled() (bool, string) { + return test.cfg.Enabled() +} + +// Prepare runs prior to Test(context.Context, []interface{}) error +func (test *TestAssembly) Prepare(ctx context.Context) ([]interface{}, error) { + return test.cfg.Prepare(ctx) +} + +// Test is the main test func +func (test *TestAssembly) Test(ctx context.Context, args []interface{}) ([]interface{}, error) { + return test.cfg.Test(ctx, args) +} + +// Cleanup runs after Test(context.Context, []interface{}) error +func (test *TestAssembly) Cleanup(ctx context.Context, args []interface{}) error { + return test.cfg.Cleanup(ctx, args) +} diff --git a/spidomtr.go b/spidomtr.go new file mode 100644 index 0000000..5693fa4 --- /dev/null +++ b/spidomtr.go @@ -0,0 +1,572 @@ +package spidomtr + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/spider-pigs/spidomtr/internal/runner" + "github.com/spider-pigs/spidomtr/pkg/testunit" +) + +const asciilogo = ` + .__ .___ __ + ____________ |__| __| _/____ ______/ |________ + / ___/\____ \| |/ __ |/ _ \ / \ __\_ __ \ + \___ \ | |_> > / /_/ ( <_> ) Y Y \ | | | \/ +/____ >| __/|__\____ |\____/|__|_| /__| |__| + \/ |__| \/ \/ ` + +// DefaultHistogramBuckets is the default number of buckets +const DefaultHistogramBuckets = 40 + +// DefaultPercentiles the default percentiles +var DefaultPercentiles = []int{10, 25, 50, 75, 90, 95, 99} + +// Result type +type Result struct { + ChildResults []Result + Date time.Time + Stats Stats + TestStats map[string]TestStats +} + +// TestResult type +type TestResult struct { + Comment string + Date time.Time + Duration time.Duration + End time.Time + Error error + ID string + Outcome testunit.TestOutcome + Start time.Time +} + +// Stats type +type Stats struct { + Average time.Duration + Count int + Description string + Distributions []LatencyDist + Duration time.Duration + Durations []time.Duration + End time.Time + Errorm map[string]int + Errors int + Fastest time.Duration + Histogram []Bucket + Passed int + RPS float64 + Skips int + Slowest time.Duration + Start time.Time +} + +// TestStats type +type TestStats struct { + TestResults []TestResult + Stats Stats +} + +// Config type +type Config struct { + Description string + ID string + Iterations int + Handlers []RunnerHandler + HistogramBuckets int + Percentiles []int + ShowLogo bool + ShowSummary bool + Timeout time.Duration + Users int +} + +// Option type +type Option func(*Config) + +// Description sets a description +func Description(description string) Option { + return func(cfg *Config) { + cfg.Description = description + } +} + +// Handlers sets runner handlers +func Handlers(h ...RunnerHandler) Option { + return func(cfg *Config) { + cfg.Handlers = h + } +} + +// HistogramBuckets sets histogram resolution (defaults to 40 buckets) +func HistogramBuckets(buckets int) Option { + return func(cfg *Config) { + cfg.HistogramBuckets = buckets + } +} + +// ID sets ID +func ID(s string) Option { + return func(cfg *Config) { + cfg.ID = s + } +} + +// Iterations sets number of iterations +func Iterations(i int) Option { + return func(cfg *Config) { + cfg.Iterations = i + } +} + +// ShowLogo should the logo be displayed (defaults to true) +func ShowLogo(b bool) Option { + return func(cfg *Config) { + cfg.ShowLogo = b + } +} + +// ShowSummary should the summary be displayed (defaults to true) +func ShowSummary(b bool) Option { + return func(cfg *Config) { + cfg.ShowSummary = b + } +} + +// Percentiles sets percentiles for latency distributions +func Percentiles(p []int) Option { + return func(cfg *Config) { + cfg.Percentiles = p + } +} + +// Timeout sets test timout (defaults to 10 secs) +func Timeout(t time.Duration) Option { + return func(cfg *Config) { + cfg.Timeout = t + } +} + +// Users sets number of users +func Users(users int) Option { + return func(cfg *Config) { + cfg.Users = users + } +} + +// RunnerHandler interface +type RunnerHandler interface { + // RunnerStarted is called when runner is started (prior to any + // tests have been run). + RunnerStarted(id, description string, testUnits int) + // TestDone is called when a test has been completed. + TestDone(res TestResult) + // RunnerDone is called when the runner has run all tests. + RunnerDone(res Result) +} + +// Runner type +type Runner struct { + cfg *Config +} + +// NewRunner constructs a new runner +func NewRunner(options ...Option) *Runner { + cfg := &Config{ + HistogramBuckets: DefaultHistogramBuckets, + Iterations: 1, + Percentiles: DefaultPercentiles, + ShowLogo: true, + ShowSummary: true, + Timeout: 10 * time.Second, + Users: 1, + } + + for _, opt := range options { + opt(cfg) + } + + return &Runner{cfg: cfg} +} + +// Run runs tests +func (r *Runner) Run(ctx context.Context, tests ...testunit.TestUnit) Result { + if r.cfg.ShowLogo { + fmt.Printf("%s\n\n", asciilogo) + } + + count := len(tests) * r.cfg.Iterations * r.cfg.Users + + for _, h := range r.cfg.Handlers { + h.RunnerStarted(r.cfg.ID, r.cfg.Description, count) + } + + if r.cfg.Users == 1 { + res := r.run(ctx, tests...) + for _, h := range r.cfg.Handlers { + h.RunnerDone(res) + } + if r.cfg.ShowSummary { + showSummary(res) + } + return res + } + + var wg sync.WaitGroup + wg.Add(r.cfg.Users) + mux := &sync.Mutex{} + + results := make([]Result, 0) + for i := 0; i < r.cfg.Users; i++ { + go func() { + defer wg.Done() + runner := NewRunner() + runner.cfg = r.cfg + res := runner.run(ctx, tests...) + mux.Lock() + defer mux.Unlock() + results = append(results, res) + }() + } + wg.Wait() + + sum := JoinResults(r.cfg.HistogramBuckets, r.cfg.Percentiles, results...) + + // Notify handlers + for _, h := range r.cfg.Handlers { + h.RunnerDone(sum) + } + + if r.cfg.ShowSummary { + showSummary(sum) + } + + return sum +} + +// Run runs tests +func (r *Runner) run(ctx context.Context, tests ...testunit.TestUnit) Result { + if hasDuplicateIDs(tests) { + panic("tests have duplicate ids") + } + + durations := make([]time.Duration, 0) + + var skipped, errored int + testRunner := runner.New(r.cfg.Timeout, r.cfg.Iterations) + + errorm := make(map[string]int) + testStats := make(map[string]TestStats) + testRunner.TestUnitDone = func(t testunit.TestUnit, timer *runner.Timer, err error) { + enabled, description := t.Enabled() + + // Set test outcome + outcome := func() testunit.TestOutcome { + if !enabled { + return testunit.Skip + } else if err != nil { + return testunit.Fail + } + return testunit.Pass + }() + + // Set result + testResult := TestResult{ + Date: time.Now(), + Duration: timer.Duration, + End: timer.End, + Error: err, + ID: t.ID(), + Outcome: outcome, + Start: timer.Start, + } + + // Handle the test outcome + switch outcome { + case testunit.Skip: + skipped++ + testResult.Comment = description + case testunit.Fail: + errored++ + errorm[err.Error()]++ + testResult.Comment = err.Error() + case testunit.Pass: + durations = append(durations, timer.Duration) + } + + // Append result to previous results + v, ok := testStats[t.ID()] + if !ok { + testResults := make([]TestResult, 0) + v = TestStats{ + TestResults: testResults, + } + } + v.TestResults = append(v.TestResults, testResult) + testStats[t.ID()] = v + + // Report test result to handlers. + for _, h := range r.cfg.Handlers { + h.TestDone(testResult) + } + } + + // Run the tests + timer := testRunner.Run(ctx, tests...) + + // Gather stats + count := len(tests) * r.cfg.Iterations + passed := count - errored - skipped + avg := avgDuration(timer.Duration, passed) + + // Sort durations in ascending order + sort.Slice(durations, func(i, j int) bool { + return durations[i] < durations[j] + }) + var fastest, slowest time.Duration + if len(durations) > 0 { + fastest = durations[0] + slowest = durations[len(durations)-1] + } + + distributions := distributions(r.cfg.Percentiles, durations) + histogram := histogram(r.cfg.HistogramBuckets, durations, slowest, fastest) + + rps := float64(0) + if timer.Duration > 0 { + rps = float64(passed+errored) / timer.Duration.Seconds() + } + + stats := Stats{ + Average: avg, + Count: count, + Distributions: distributions, + Durations: durations, + Duration: timer.Duration, + Errorm: errorm, + Errors: errored, + Fastest: fastest, + End: timer.End, + Histogram: histogram, + Passed: passed, + RPS: rps, + Skips: skipped, + Slowest: slowest, + Start: timer.Start, + } + + calcTestStats(r.cfg.HistogramBuckets, r.cfg.Percentiles, testStats) + + return Result{ + Date: time.Now(), + Stats: stats, + TestStats: testStats, + } +} + +func calcTestStats(buckets int, percentiles []int, testStats map[string]TestStats) { + for k, v := range testStats { + // Create stats from invidual tests + durations := make([]time.Duration, 0) + var accduration time.Duration + starts := make([]time.Time, 0) + ends := make([]time.Time, 0) + errorm := make(map[string]int) + ok, skips, err := 0, 0, 0 + for _, r := range v.TestResults { + if r.Error != nil { + errorm[r.Error.Error()]++ + } + accduration += r.Duration + if !r.Start.IsZero() { + starts = append(starts, r.Start) + ends = append(ends, r.End) + } + switch r.Outcome { + case testunit.Pass: + ok++ + durations = append(durations, r.Duration) + case testunit.Fail: + err++ + case testunit.Skip: + skips++ + } + } + + // Sort in starts asc + sort.Slice(starts, func(i, j int) bool { + return starts[i].Before(starts[j]) + }) + + // Sort in ends desc + sort.Slice(ends, func(i, j int) bool { + return ends[i].After(ends[j]) + }) + + // Sort in duration asc + sort.Slice(durations, func(i, j int) bool { + return durations[i] < durations[j] + }) + + var start time.Time + var end time.Time + if len(starts) > 0 { + start = starts[0] + } + if len(ends) > 0 { + end = ends[0] + } + duration := end.Sub(start) + + var fastest, slowest time.Duration + if len(durations) > 0 { + fastest = durations[0] + slowest = durations[len(durations)-1] + } + + hist := histogram(buckets, durations, slowest, fastest) + dist := distributions(percentiles, durations) + c := len(v.TestResults) + v.Stats.Average = avgDuration(accduration, ok) + v.Stats.Count = c + v.Stats.Distributions = dist + v.Stats.Duration = duration + v.Stats.Durations = durations + v.Stats.Errors = err + v.Stats.Errorm = errorm + v.Stats.Fastest = fastest + v.Stats.Histogram = hist + v.Stats.Passed = ok + v.Stats.Skips = skips + v.Stats.Slowest = slowest + + rps := float64(0) + if duration > 0 { + rps = float64(ok+err) / duration.Seconds() + } + + v.Stats.RPS = rps + testStats[k] = v + } +} + +// JoinResults joins results to a combined result +func JoinResults(histogramBuckets int, percentiles []int, results ...Result) Result { + count, ok, skips, err := 0, 0, 0, 0 + var accduration time.Duration + starts := make([]time.Time, 0) + ends := make([]time.Time, 0) + durations := make([]time.Duration, 0) + errorm := make(map[string]int) + for _, r := range results { + count += r.Stats.Count + accduration += r.Stats.Duration + err += r.Stats.Errors + for k, v := range r.Stats.Errorm { + errorm[k] += v + } + if !r.Stats.Start.IsZero() { + starts = append(starts, r.Stats.Start) + ends = append(ends, r.Stats.End) + } + skips += r.Stats.Skips + ok += r.Stats.Count - r.Stats.Skips - r.Stats.Errors + durations = append(durations, r.Stats.Durations...) + } + + // Sort in starts asc + sort.Slice(starts, func(i, j int) bool { + return starts[i].Before(starts[j]) + }) + + // Sort in ends desc + sort.Slice(ends, func(i, j int) bool { + return ends[i].After(ends[j]) + }) + + sort.Slice(durations, func(i, j int) bool { + return durations[i] < durations[j] + }) + + start := starts[0] + end := ends[0] + totalDuration := end.Sub(start) + + avg := avgDuration(accduration, ok) + dist := distributions(percentiles, durations) + + var fastest, slowest time.Duration + if len(durations) > 0 { + fastest = durations[0] + slowest = durations[len(durations)-1] + } + + hist := histogram(histogramBuckets, durations, slowest, fastest) + + testStats := func() map[string]TestStats { + testStats := make(map[string]TestStats) + for _, r := range results { + for id, s := range r.TestStats { + v, ok := testStats[id] + if ok { + v.TestResults = append(v.TestResults, s.TestResults...) + testStats[id] = v + } else { + testStats[id] = s + } + } + } + return testStats + }() + + // Collect all test stats + calcTestStats(histogramBuckets, percentiles, testStats) + + rps := float64(0) + if totalDuration > 0 { + rps = float64(ok+err) / totalDuration.Seconds() + } + + stats := Stats{ + Average: avg, + Count: count, + Distributions: dist, + Duration: totalDuration, + Durations: durations, + End: end, + Errors: err, + Errorm: errorm, + Fastest: fastest, + Histogram: hist, + Passed: ok, + RPS: rps, + Skips: skips, + Slowest: slowest, + Start: start, + } + + res := Result{ + ChildResults: results, + Date: time.Now(), + Stats: stats, + TestStats: testStats, + } + + return res +} + +func hasDuplicateIDs(tests []testunit.TestUnit) bool { + m := make(map[string]int) + for _, t := range tests { + _, ok := m[t.ID()] + if ok { + return true + } + m[t.ID()]++ + } + return false +} diff --git a/spidomtr_test.go b/spidomtr_test.go new file mode 100644 index 0000000..e04c98e --- /dev/null +++ b/spidomtr_test.go @@ -0,0 +1,241 @@ +package spidomtr_test + +import ( + "context" + "errors" + "math/rand" + "testing" + "time" + + "github.com/spider-pigs/spidomtr" + "github.com/spider-pigs/spidomtr/pkg/handlers" + "github.com/spider-pigs/spidomtr/pkg/testunit" + "github.com/stretchr/testify/require" +) + +func TestRunner(t *testing.T) { + t.Run("test runner with passing tests", func(t *testing.T) { + runner := spidomtr.NewRunner( + spidomtr.Iterations(50), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + ) + + test := testunit.New( + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + return nil, nil + }), + ) + + res := runner.Run(context.Background(), test) + require.NotNil(t, res) + require.Equal(t, 50, res.Stats.Count) + require.Equal(t, 50, res.Stats.Passed) + require.Equal(t, 0, res.Stats.Skips) + require.Equal(t, 0, res.Stats.Errors) + }) + t.Run("test runner with error tests", func(t *testing.T) { + runner := spidomtr.NewRunner( + spidomtr.Iterations(50), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + ) + + test := testunit.New( + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + return nil, errors.New("whooops") + }), + ) + + res := runner.Run(context.Background(), test) + require.NotNil(t, res) + require.Equal(t, 50, res.Stats.Count) + require.Equal(t, 0, res.Stats.Passed) + require.Equal(t, 0, res.Stats.Skips) + require.Equal(t, 50, res.Stats.Errors) + }) + t.Run("test runner with skipped tests", func(t *testing.T) { + runner := spidomtr.NewRunner( + spidomtr.Iterations(50), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + ) + + test := testunit.New( + testunit.Enabled(func() (bool, string) { + return false, "leave me alone" + }), + ) + + res := runner.Run(context.Background(), test) + require.NotNil(t, res) + require.Equal(t, 50, res.Stats.Count) + require.Equal(t, 0, res.Stats.Passed) + require.Equal(t, 50, res.Stats.Skips) + require.Equal(t, 0, res.Stats.Errors) + }) + t.Run("test runner with pass, skip and error", func(t *testing.T) { + runner := spidomtr.NewRunner( + spidomtr.Iterations(50), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + ) + + test1 := testunit.New( + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + time.Sleep(1 * time.Millisecond) + return nil, nil + }), + ) + + test2 := testunit.New( + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + time.Sleep(10 * time.Millisecond) + return nil, errors.New("whooops") + }), + ) + + test3 := testunit.New( + testunit.Enabled(func() (bool, string) { + return false, "leave me alone" + }), + ) + + res := runner.Run(context.Background(), test1, test2, test3) + require.NotNil(t, res) + require.Equal(t, 150, res.Stats.Count) + require.Equal(t, 50, res.Stats.Passed) + require.Equal(t, 50, res.Stats.Skips) + require.Equal(t, 50, res.Stats.Errors) + }) +} + +func TestJoinResults(t *testing.T) { + test := testunit.New( + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + return nil, nil + }), + ) + + runner1 := spidomtr.NewRunner( + spidomtr.Iterations(50), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + ) + res1 := runner1.Run(context.Background(), test) + + runner2 := spidomtr.NewRunner( + spidomtr.Iterations(50), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + ) + res2 := runner2.Run(context.Background(), test) + + res := spidomtr.JoinResults(spidomtr.DefaultHistogramBuckets, spidomtr.DefaultPercentiles, res1, res2) + require.NotNil(t, res) + require.Equal(t, 100, res.Stats.Count) + require.Equal(t, 100, res.Stats.Passed) + require.Equal(t, 0, len(res.Stats.Errorm)) + require.Equal(t, 0, res.Stats.Skips) + require.Equal(t, 0, res.Stats.Errors) +} + +func TestRunnerWithUsers(t *testing.T) { + runner := spidomtr.NewRunner( + spidomtr.ID("stupid"), + spidomtr.Description("just running some stupid tests"), + spidomtr.ShowLogo(false), + spidomtr.ShowSummary(false), + spidomtr.Iterations(50), + spidomtr.Timeout(time.Second*10), + spidomtr.Users(10), + ) + + test1 := testunit.New( + testunit.ID("awesome_test"), + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + time.Sleep(1 * time.Millisecond) + return nil, nil + }), + ) + + test2 := testunit.New( + testunit.ID("not_always_an_awesome_test"), + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + time.Sleep(1 * time.Millisecond) + if coinflip() == "heads" { + return nil, errors.New("whooops") + } + return nil, nil + }), + ) + + test3 := testunit.New( + testunit.ID("skipped_test"), + testunit.Enabled(func() (bool, string) { + return false, "leave me alone" + }), + ) + + res := runner.Run(context.Background(), test1, test2, test3) + require.NotNil(t, res) + require.Equal(t, 1500, res.Stats.Count) +} + +func TestHandlers(t *testing.T) { + runner := spidomtr.NewRunner( + spidomtr.HistogramBuckets(5), + spidomtr.Description("just running some stupid tests"), + spidomtr.Handlers(handlers.ProgressBar()), + spidomtr.ID("stupid"), + spidomtr.Iterations(50), + spidomtr.ShowSummary(true), + spidomtr.Timeout(time.Second*10), + spidomtr.Users(10), + ) + + min := 10 + max := 50 + + test1 := testunit.New( + testunit.ID("awesome_test"), + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + latency := rand.Intn(max-min) + min + time.Sleep(time.Duration(latency) * time.Millisecond) + return nil, nil + }), + ) + + test2 := testunit.New( + testunit.ID("not_always_an_awesome_test"), + testunit.Test(func(context.Context, []interface{}) ([]interface{}, error) { + time.Sleep(20 * time.Millisecond) + if coinflip() == "heads" { + return nil, errors.New("whooops") + } + return nil, nil + }), + ) + + test3 := testunit.New( + testunit.ID("skipped_test"), + testunit.Enabled(func() (bool, string) { + return false, "leave me alone" + }), + ) + + res := runner.Run(context.Background(), test1, test2, test3) + require.NotNil(t, res) + require.Equal(t, 1500, res.Stats.Count) + require.Equal(t, 500, res.Stats.Skips) + require.Greater(t, res.Stats.Passed, 500) +} + +func coinflip() string { + coin := []string{ + "heads", + "tails", + } + rand.Seed(time.Now().UnixNano()) + return coin[rand.Intn(len(coin))] +} diff --git a/stats.go b/stats.go new file mode 100644 index 0000000..d861e5a --- /dev/null +++ b/stats.go @@ -0,0 +1,81 @@ +package spidomtr + +import "time" + +// Bucket type +type Bucket struct { + Count int + Frequency int + Mark time.Duration +} + +// LatencyDist type +type LatencyDist struct { + Percentage int + Latency time.Duration +} + +func avgDuration(duration time.Duration, total int) time.Duration { + if total == 0 { + return 0 + } + return duration / time.Duration(total) +} + +func distributions(percentiles []int, latencies []time.Duration) []LatencyDist { + data := make([]time.Duration, len(percentiles)) + for i, j := 0, 0; i < len(latencies) && j < len(percentiles); i++ { + current := i * 100 / len(latencies) + + if current >= percentiles[j] { + data[j] = latencies[i] + j++ + } + } + + res := make([]LatencyDist, len(percentiles)) + for i := 0; i < len(percentiles); i++ { + if data[i] > 0 { + lat := data[i] + res[i] = LatencyDist{Percentage: percentiles[i], Latency: lat} + } + } + return res +} + +func histogram(resolution int, latencies []time.Duration, slowest, fastest time.Duration) []Bucket { + bc := int64(resolution) + buckets := make([]time.Duration, bc+1) + counts := make([]int, bc+1) + bs := int64(slowest-fastest) / bc + for i := int64(0); i < bc; i++ { + buckets[i] = time.Duration(int64(fastest) + int64(bs)*i) + } + buckets[bc] = slowest + var bi int + var max int + for i := 0; i < len(latencies); { + if latencies[i] <= buckets[bi] { + i++ + counts[bi]++ + if max < counts[bi] { + max = counts[bi] + } + } else if bi < len(buckets)-1 { + bi++ + } + } + res := make([]Bucket, len(buckets)) + latencyCount := len(latencies) + if latencyCount > 0 { + for i := 0; i < len(buckets); i++ { + res[i] = Bucket{ + Mark: buckets[i], + Count: counts[i], + Frequency: counts[i] / latencyCount, + } + } + } + + return res +} diff --git a/summary.go b/summary.go new file mode 100644 index 0000000..11076e7 --- /dev/null +++ b/summary.go @@ -0,0 +1,134 @@ +package spidomtr + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + "time" +) + +const ( + barChar = "∎" + checkMark = "√" + crossMark = "☓" + skipMark = "-" +) + +func showSummary(res Result) { + // Total test summary + fmt.Print("\nSummary:\n") + fmt.Printf("%2s%-10s %d\n", "", "Count:", res.Stats.Count) + fmt.Printf("%2s%-10s %s\n", "", "Total:", res.Stats.Duration) + fmt.Printf("%2s%-10s %d ms\n", "", "Slowest:", int64(res.Stats.Slowest/time.Millisecond)) + fmt.Printf("%2s%-10s %d ms\n", "", "Fastest:", int64(res.Stats.Fastest/time.Millisecond)) + fmt.Printf("%2s%-10s %d ms\n", "", "Average:", int64(res.Stats.Average/time.Millisecond)) + fmt.Printf("%2s%-10s %4.2f\n", "", "Req/sec:", res.Stats.RPS) + + // Response time histogram + fmt.Print("\nResponse time histogram:\n") + fmt.Print(histogramStr(res.Stats.Histogram)) + + // Latency distributions + fmt.Print("\nLatency distribution:\n") + for _, d := range res.Stats.Distributions { + if d.Latency > 0 && d.Percentage > 0 { + fmt.Printf("%2s%d%% in %d ms\n", "", d.Percentage, int64(d.Latency/time.Millisecond)) + } + } + + // Responses + fmt.Print("\nResponses:\n") + fmt.Printf("%2s%-10s %d\n", "", "OK:", res.Stats.Passed) + fmt.Printf("%2s%-10s %d\n", "", "Errored:", res.Stats.Errors) + fmt.Printf("%2s%-10s %d\n", "", "Skipped:", res.Stats.Skips) + + // Print error distribution + if len(res.Stats.Errorm) > 0 { + keys := make([]string, 0, len(res.Stats.Errorm)) + for k := range res.Stats.Errorm { + keys = append(keys, k) + } + sort.Strings(keys) + + fmt.Print("\nError distribution:\n") + for _, err := range keys { + fmt.Printf("%2s[%v] %s\n", "", res.Stats.Errorm[err], err) + } + } + + // Print stats on each test + fmt.Print("\nTests:\n") + for k, testStats := range res.TestStats { + fmt.Print("\n") + fmt.Printf("%2s%-10s\n", "", toMark(testStats)+" "+k) + fmt.Printf("%4s%-10s %v\n", "", "Count:", testStats.Stats.Count) + fmt.Printf("%4s%-10s %v\n", "", "OK:", testStats.Stats.Passed) + fmt.Printf("%4s%-10s %v\n", "", "Errored:", testStats.Stats.Errors) + fmt.Printf("%4s%-10s %v\n", "", "Skipped:", testStats.Stats.Skips) + if testStats.Stats.Slowest > 0 { + fmt.Printf("%4s%-10s %v ms\n", "", "Slowest:", int64(testStats.Stats.Slowest/time.Millisecond)) + } + if testStats.Stats.Fastest > 0 { + fmt.Printf("%4s%-10s %v ms\n", "", "Fastest:", int64(testStats.Stats.Fastest/time.Millisecond)) + } + if testStats.Stats.Average > 0 { + fmt.Printf("%4s%-10s %v ms\n", "", "Average:", int64(testStats.Stats.Average/time.Millisecond)) + } + + for _, d := range testStats.Stats.Distributions { + if d.Percentage >= 90 { + strlatency := strconv.FormatInt(int64(d.Latency/time.Millisecond), 10) + fmt.Printf("%4s%-10s %s ms\n", "", strconv.Itoa(d.Percentage)+"%:", strlatency) + } + } + + if testStats.Stats.RPS > 0 { + fmt.Printf("%4s%-10s %4.2f\n", "", "Req/sec:", testStats.Stats.RPS) + } + + // Print error distribution + if len(testStats.Stats.Errorm) > 0 { + fmt.Printf("%4s%-10s\n", "", "Errors:") + for err, count := range testStats.Stats.Errorm { + fmt.Printf("%8s[%v] %s\n", "", count, err) + } + } + } +} + +func histogramStr(buckets []Bucket) string { + max := 0 + for _, b := range buckets { + if v := b.Count; v > max { + max = v + } + } + res := new(bytes.Buffer) + for _, b := range buckets { + // Normalize bar lengths. + var barLen int + if max > 0 { + barLen = (b.Count*40 + max/2) / max + } + if b.Count > 0 { + res.WriteString(fmt.Sprintf("%2s%4s ms %-8s%s%s\n", "", strconv.FormatInt(int64(b.Mark/time.Millisecond), 10), + "["+strconv.Itoa(b.Count)+"]", "|", strings.Repeat(barChar, barLen))) + } + } + return res.String() +} + +func toMark(stats TestStats) string { + if stats.Stats.Errors > 0 { + return crossMark + } + if stats.Stats.Skips > 0 { + return skipMark + } + if stats.Stats.Passed > 0 { + return checkMark + } + return "" +}