From 105fece47a108f73bd18128af0403acbe8228d36 Mon Sep 17 00:00:00 2001 From: Steven Normore Date: Mon, 14 Apr 2014 15:45:44 +0000 Subject: [PATCH] add bench sub-package --- Makefile | 4 +- bench.go => bench/bench.go | 39 +++++++++--------- bench/config.go | 7 ++++ bench/generate.go | 24 +++++++++++ cmd/bolt/bench.go | 36 ++++------------- tx_test.go | 81 ++++++++++++++++---------------------- 6 files changed, 97 insertions(+), 94 deletions(-) rename bench.go => bench/bench.go (69%) create mode 100644 bench/config.go create mode 100644 bench/generate.go diff --git a/Makefile b/Makefile index 81beac3..0ed5996 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" bench: - go test -v -test.bench=$(BENCH) + go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) # http://cloc.sourceforge.net/ cloc: @@ -24,7 +24,7 @@ cpuprofile: fmt # go get github.com/kisielk/errcheck errcheck: @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt + @.go/bin/errcheck github.com/boltdb/bolt fmt: @go fmt ./... diff --git a/bench.go b/bench/bench.go similarity index 69% rename from bench.go rename to bench/bench.go index e41c260..df584f2 100644 --- a/bench.go +++ b/bench/bench.go @@ -1,10 +1,12 @@ -package bolt +package bench import ( "errors" "fmt" "sync" "testing" + + "github.com/boltdb/bolt" ) const ( @@ -15,14 +17,15 @@ const ( ) type Benchmark struct { - db *DB - ReadWriteMode string - TraversalPattern string - Parallelism int + db *bolt.DB + config *Config } -func NewBenchmark(db *DB, readWriteMode, traversalPattern string, parallelism int) *Benchmark { - return &Benchmark{db, readWriteMode, traversalPattern, parallelism} +func New(db *bolt.DB, config *Config) *Benchmark { + b := new(Benchmark) + b.db = db + b.config = config + return b } func (bm *Benchmark) Run(b *testing.B) { @@ -47,11 +50,11 @@ func (bm *Benchmark) Run(b *testing.B) { // Keep running a fixed number of parallel reads until we run out of time. for i := 0; i < b.N; i++ { var wg sync.WaitGroup - for j := 0; j < bm.Parallelism; j++ { + for j := 0; j < bm.config.Parallelism; j++ { wg.Add(1) go func() { defer wg.Done() - if err := bm.runBuckets(b, bm.db, bucketsWithKeys); err != nil { + if err := bm.readBuckets(b, bm.db, bucketsWithKeys); err != nil { b.Fatalf("error: %+v", err) } }() @@ -61,13 +64,13 @@ func (bm *Benchmark) Run(b *testing.B) { } // Run benchmark(s) for each of the given buckets. -func (bm *Benchmark) runBuckets(b *testing.B, db *DB, bucketsWithKeys map[string][]string) error { - return db.View(func(tx *Tx) error { +func (bm *Benchmark) readBuckets(b *testing.B, db *bolt.DB, bucketsWithKeys map[string][]string) error { + return db.View(func(tx *bolt.Tx) error { bucketsCount := len(bucketsWithKeys) count := 0 for bucket, keys := range bucketsWithKeys { bucket := tx.Bucket([]byte(bucket)) - if err := bm.runKeys(b, bucket, keys); err != nil { + if err := bm.readKeys(b, bucket, keys); err != nil { return err } count++ @@ -79,7 +82,7 @@ func (bm *Benchmark) runBuckets(b *testing.B, db *DB, bucketsWithKeys map[string }) } -func (bm *Benchmark) runKeys(b *testing.B, bucket *Bucket, keys []string) error { +func (bm *Benchmark) readKeys(b *testing.B, bucket *bolt.Bucket, keys []string) error { c := bucket.Cursor() keysCount := len(keys) count := 0 @@ -92,11 +95,11 @@ func (bm *Benchmark) runKeys(b *testing.B, bucket *Bucket, keys []string) error return nil } -func buckets(db *DB) ([]string, error) { +func buckets(db *bolt.DB) ([]string, error) { buckets := []string{} - err := db.View(func(tx *Tx) error { + err := db.View(func(tx *bolt.Tx) error { // Iterate over each bucket. - return tx.ForEach(func(name []byte, _ *Bucket) error { + return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { buckets = append(buckets, string(name)) return nil }) @@ -104,9 +107,9 @@ func buckets(db *DB) ([]string, error) { return buckets, err } -func keys(db *DB, bucket string) ([]string, error) { +func keys(db *bolt.DB, bucket string) ([]string, error) { keys := []string{} - err := db.View(func(tx *Tx) error { + err := db.View(func(tx *bolt.Tx) error { // Find bucket. b := tx.Bucket([]byte(bucket)) if b == nil { diff --git a/bench/config.go b/bench/config.go new file mode 100644 index 0000000..dea08fd --- /dev/null +++ b/bench/config.go @@ -0,0 +1,7 @@ +package bench + +type Config struct { + ReadWriteMode string + TraversalPattern string + Parallelism int +} diff --git a/bench/generate.go b/bench/generate.go new file mode 100644 index 0000000..8c5554d --- /dev/null +++ b/bench/generate.go @@ -0,0 +1,24 @@ +package bench + +import ( + "fmt" + "strings" + + "github.com/boltdb/bolt" +) + +// Generate and write data to specified number of buckets/items. +func GenerateDB(db *bolt.DB, numBuckets, numItemsPerBucket int) error { + return db.Update(func(tx *bolt.Tx) error { + for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { + bucketName := fmt.Sprintf("bucket%08d") + tx.CreateBucket([]byte(bucketName)) + bucket := tx.Bucket([]byte(bucketName)) + for i := 0; i < numItemsPerBucket; i++ { + value := []byte(strings.Repeat("0", 100)) + bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value) + } + } + return nil + }) +} diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go index f894a70..2b6ff8a 100644 --- a/cmd/bolt/bench.go +++ b/cmd/bolt/bench.go @@ -4,37 +4,15 @@ import ( "testing" "github.com/boltdb/bolt" + "github.com/boltdb/bolt/bench" ) // Import converts an exported database dump into a new database. -// parallelism: integer representing number of concurrent reads/writes // readWriteMode: 'read' or 'write' // traversalPattern: 'sequentrial' or 'random' +// parallelism: integer representing number of concurrent reads/writes func Bench(inputPath string, readWriteMode string, traversalPattern string, parallelism int) { - // cursor/sequential reads - // random reads - - // sequential writes - // random writes - - // reading from many buckets - // writing to many buckets - - // read from many paths - // writing to many paths - - // bucket size/messages - // bucket depth - - // concurrency - - // chart/graph - - // profile - - // benchmarks for getting all keys - // Open the database. db, err := bolt.Open(inputPath, 0600) if err != nil { @@ -43,9 +21,11 @@ func Bench(inputPath string, readWriteMode string, traversalPattern string, para } defer db.Close() - b := bolt.NewBenchmark(db, readWriteMode, traversalPattern, parallelism) + b := bench.New(db, &bench.Config{ + ReadWriteMode: readWriteMode, + TraversalPattern: traversalPattern, + Parallelism: parallelism, + }) - result := testing.Benchmark(b.Run) - - println(result) + println(testing.Benchmark(b.Run)) } diff --git a/tx_test.go b/tx_test.go index 17498ed..61810d3 100644 --- a/tx_test.go +++ b/tx_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/boltdb/bolt/bench" "github.com/stretchr/testify/assert" ) @@ -266,121 +267,109 @@ func TestTx_OnCommit_Rollback(t *testing.T) { assert.Equal(t, 0, x) } -func BenchmarkReadSequential_1Buckets_1Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 1) } -func BenchmarkReadSequential_1Buckets_10Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 1) } -func BenchmarkReadSequential_1Buckets_100Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 1) } -func BenchmarkReadSequential_1Buckets_1000Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 1) } -func BenchmarkReadSequential_1Buckets_10000Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 1) } -func BenchmarkReadSequential_1Buckets_1Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 10) } -func BenchmarkReadSequential_1Buckets_10Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 10) } -func BenchmarkReadSequential_1Buckets_100Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 10) } -func BenchmarkReadSequential_1Buckets_1000Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 10) } -func BenchmarkReadSequential_1Buckets_10000Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 10) } -func BenchmarkReadSequential_1Buckets_1Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 100) } -func BenchmarkReadSequential_1Buckets_10Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 100) } -func BenchmarkReadSequential_1Buckets_100Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 100) } -func BenchmarkReadSequential_1Buckets_1000Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 100) } -func BenchmarkReadSequential_1Buckets_10000Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 100) } -func BenchmarkReadSequential_1Buckets_1Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 1000) } -func BenchmarkReadSequential_1Buckets_10Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 1000) } -func BenchmarkReadSequential_1Buckets_100Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 1000) } -func BenchmarkReadSequential_1Buckets_1000Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 1000) } -func BenchmarkReadSequential_1Buckets_10000Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 1000) } -func BenchmarkReadSequential_1Buckets_1Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 10000) } -func BenchmarkReadSequential_1Buckets_10Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 10000) } -func BenchmarkReadSequential_1Buckets_100Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 10000) } -func BenchmarkReadSequential_1Buckets_1000Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 10000) } -func BenchmarkReadSequential_1Buckets_10000Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 10000) } func benchmark(b *testing.B, readWriteMode, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { withOpenDB(func(db *DB, path string) { - if err := generateDB(db, numBuckets, numItemsPerBucket); err != nil { + if err := bench.GenerateDB(db, numBuckets, numItemsPerBucket); err != nil { b.Fatal(err) } - NewBenchmark(db, readWriteMode, traversalPattern, parallelism).Run(b) + bench.New(db, &bench.Config{ + ReadWriteMode: readWriteMode, + TraversalPattern: traversalPattern, + Parallelism: parallelism, + }).Run(b) }) } func benchmarkRead(b *testing.B, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { - benchmark(b, BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism) + benchmark(b, bench.BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism) } func benchmarkReadSequential(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { - benchmark(b, BenchReadMode, BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism) + benchmark(b, bench.BenchReadMode, bench.BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism) } func benchmarkReadRandom(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { - benchmark(b, BenchReadMode, BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism) -} - -// Generate and write data to specified number of buckets/items. -func generateDB(db *DB, numBuckets, numItemsPerBucket int) error { - return db.Update(func(tx *Tx) error { - for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { - bucketName := fmt.Sprintf("bucket%08d") - tx.CreateBucket([]byte(bucketName)) - bucket := tx.Bucket([]byte(bucketName)) - for i := 0; i < numItemsPerBucket; i++ { - value := []byte(strings.Repeat("0", 100)) - bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value) - } - } - return nil - }) + benchmark(b, bench.BenchReadMode, bench.BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism) } // Benchmark the performance iterating over a cursor.