mirror of https://github.com/etcd-io/bbolt.git
add benchmarks using Benchmark framework
parent
29b7d0a9a4
commit
97bd718b02
71
bench.go
71
bench.go
|
@ -3,7 +3,6 @@ package bolt
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
@ -16,30 +15,32 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Benchmark struct {
|
type Benchmark struct {
|
||||||
InputPath string
|
db *DB
|
||||||
ReadWriteMode string
|
ReadWriteMode string
|
||||||
TraversalPattern string
|
TraversalPattern string
|
||||||
Parallelism int
|
Parallelism int
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBenchmark(inputPath, readWriteMode, traversalPattern string, parallelism int) *Benchmark {
|
func NewBenchmark(db *DB, readWriteMode, traversalPattern string, parallelism int) *Benchmark {
|
||||||
return &Benchmark{inputPath, readWriteMode, traversalPattern, parallelism}
|
return &Benchmark{db, readWriteMode, traversalPattern, parallelism}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bm *Benchmark) Run(b *testing.B) {
|
func (bm *Benchmark) Run(b *testing.B) {
|
||||||
|
|
||||||
// Open the database.
|
// Read buckets and keys before benchmark begins so we don't knew the
|
||||||
db, err := Open(bm.InputPath, 0600)
|
// results.
|
||||||
|
buckets, err := buckets(bm.db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error: %+v", err)
|
b.Fatalf("error: %+v", err)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
defer db.Close()
|
bucketsWithKeys := make(map[string][]string)
|
||||||
|
for _, bucket := range buckets {
|
||||||
buckets, err := buckets(db, bm.InputPath)
|
keys, err := keys(bm.db, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error: %+v", err)
|
b.Fatalf("error: %+v", err)
|
||||||
}
|
}
|
||||||
|
bucketsWithKeys[bucket] = keys
|
||||||
|
}
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
|
@ -50,7 +51,7 @@ func (bm *Benchmark) Run(b *testing.B) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if err := bm.runBuckets(b, db, buckets); err != nil {
|
if err := bm.runBuckets(b, bm.db, bucketsWithKeys); err != nil {
|
||||||
b.Fatalf("error: %+v", err)
|
b.Fatalf("error: %+v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -60,30 +61,39 @@ func (bm *Benchmark) Run(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run benchmark(s) for each of the given buckets.
|
// Run benchmark(s) for each of the given buckets.
|
||||||
func (bm *Benchmark) runBuckets(b *testing.B, db *DB, buckets []string) error {
|
func (bm *Benchmark) runBuckets(b *testing.B, db *DB, bucketsWithKeys map[string][]string) error {
|
||||||
return db.View(func(tx *Tx) error {
|
return db.View(func(tx *Tx) error {
|
||||||
bucketsCount := len(buckets)
|
bucketsCount := len(bucketsWithKeys)
|
||||||
for _, bucket := range buckets {
|
|
||||||
c := tx.Bucket([]byte(bucket)).Cursor()
|
|
||||||
count := 0
|
count := 0
|
||||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
for bucket, keys := range bucketsWithKeys {
|
||||||
|
bucket := tx.Bucket([]byte(bucket))
|
||||||
|
if err := bm.runKeys(b, bucket, keys); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
if count != bucketsCount {
|
if count != bucketsCount {
|
||||||
return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount))
|
return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount))
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func buckets(db *DB, path string) ([]string, error) {
|
func (bm *Benchmark) runKeys(b *testing.B, bucket *Bucket, keys []string) error {
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
c := bucket.Cursor()
|
||||||
return nil, err
|
keysCount := len(keys)
|
||||||
|
count := 0
|
||||||
|
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||||
|
count++
|
||||||
}
|
}
|
||||||
|
if count != keysCount {
|
||||||
|
return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, keysCount))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buckets(db *DB) ([]string, error) {
|
||||||
buckets := []string{}
|
buckets := []string{}
|
||||||
|
|
||||||
err := db.View(func(tx *Tx) error {
|
err := db.View(func(tx *Tx) error {
|
||||||
// Iterate over each bucket.
|
// Iterate over each bucket.
|
||||||
return tx.ForEach(func(name []byte, _ *Bucket) error {
|
return tx.ForEach(func(name []byte, _ *Bucket) error {
|
||||||
|
@ -91,6 +101,23 @@ func buckets(db *DB, path string) ([]string, error) {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
return buckets, err
|
return buckets, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func keys(db *DB, bucket string) ([]string, error) {
|
||||||
|
keys := []string{}
|
||||||
|
err := db.View(func(tx *Tx) error {
|
||||||
|
// Find bucket.
|
||||||
|
b := tx.Bucket([]byte(bucket))
|
||||||
|
if b == nil {
|
||||||
|
return errors.New(fmt.Sprintf("bucket %+v not found", b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate over each key.
|
||||||
|
return b.ForEach(func(key, _ []byte) error {
|
||||||
|
keys = append(keys, string(key))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
return keys, err
|
||||||
|
}
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
package bench
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
type bucketItems map[string]string
|
|
||||||
type buckets map[string]bucketItems
|
|
||||||
|
|
||||||
type Benchmark struct {
|
|
||||||
buckets buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(filePath string) (*Benchmark, error) {
|
|
||||||
data := readFromFile(filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func readFromFile(filePath string) (*Benchmark, error) {
|
|
||||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
file, err := ioutil.ReadFile(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
b := new(Benchmark)
|
|
||||||
if err := json.Unmarshal(file, &b.buckets); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *Benchmark) Run() error {
|
|
||||||
fmt.Println("Do things, run benchmarks, tell people...")
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -35,7 +35,15 @@ func Bench(inputPath string, readWriteMode string, traversalPattern string, para
|
||||||
|
|
||||||
// benchmarks for getting all keys
|
// benchmarks for getting all keys
|
||||||
|
|
||||||
b := bolt.NewBenchmark(inputPath, readWriteMode, traversalPattern, parallelism)
|
// Open the database.
|
||||||
|
db, err := bolt.Open(inputPath, 0600)
|
||||||
|
if err != nil {
|
||||||
|
fatalf("error: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
b := bolt.NewBenchmark(db, readWriteMode, traversalPattern, parallelism)
|
||||||
|
|
||||||
result := testing.Benchmark(b.Run)
|
result := testing.Benchmark(b.Run)
|
||||||
|
|
||||||
|
|
|
@ -1,32 +1,55 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strings"
|
"github.com/boltdb/bolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Generate data for benchmarks.
|
// Generate data for benchmarks.
|
||||||
func Generate(numEvents int, destPath string) {
|
func Generate(destPath string, numBuckets, numItems int) {
|
||||||
f, err := os.Create(destPath)
|
|
||||||
|
// Open the database.
|
||||||
|
db, err := bolt.Open(destPath, 0600)
|
||||||
|
if err != nil {
|
||||||
|
fatalf("open db:", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ {
|
||||||
|
bucketName := fmt.Sprintf("bucket%03d", bucketIndex)
|
||||||
|
|
||||||
|
err = db.Update(func(tx *bolt.Tx) error {
|
||||||
|
|
||||||
|
// Create the bucket if it doesn't exist.
|
||||||
|
if err := tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||||
|
fatalf("create bucket: %s", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find bucket.
|
||||||
|
b := tx.Bucket([]byte(bucketName))
|
||||||
|
if b == nil {
|
||||||
|
fatalf("bucket not found: %s", bucketName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numItems; i++ {
|
||||||
|
key := fmt.Sprintf("key%03d", i)
|
||||||
|
value := fmt.Sprintf("value%03d", i)
|
||||||
|
|
||||||
|
// Set value for a given key.
|
||||||
|
if err := b.Put([]byte(key), []byte(value)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
return
|
||||||
defer func() {
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
fatal(err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
w := bufio.NewWriter(f)
|
|
||||||
|
|
||||||
for i := 0; i < numEvents; i++ {
|
|
||||||
if _, err := w.Write([]byte(fmt.Sprintf("key%d:%s\n", i, strings.Repeat("0", 64)))); err != nil {
|
|
||||||
fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = w.Flush(); err != nil {
|
|
||||||
fatal(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
package main
|
|
|
@ -1,8 +1,6 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
"github.com/boltdb/bolt"
|
||||||
|
@ -10,44 +8,34 @@ import (
|
||||||
|
|
||||||
// Keys retrieves a list of keys for a given bucket.
|
// Keys retrieves a list of keys for a given bucket.
|
||||||
func Keys(path, name string) {
|
func Keys(path, name string) {
|
||||||
keys, err := keys(path, name)
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
fatal(err)
|
fatal(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
println(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func keys(path, name string) ([]string, error) {
|
|
||||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err := bolt.Open(path, 0600)
|
db, err := bolt.Open(path, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
fatal(err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
keys := []string{}
|
|
||||||
|
|
||||||
err = db.View(func(tx *bolt.Tx) error {
|
err = db.View(func(tx *bolt.Tx) error {
|
||||||
// Find bucket.
|
// Find bucket.
|
||||||
b := tx.Bucket([]byte(name))
|
b := tx.Bucket([]byte(name))
|
||||||
if b == nil {
|
if b == nil {
|
||||||
return errors.New(fmt.Sprintf("bucket %+v not found", b))
|
fatalf("bucket not found: %s", name)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over each key.
|
// Iterate over each key.
|
||||||
return b.ForEach(func(key, _ []byte) error {
|
return b.ForEach(func(key, _ []byte) error {
|
||||||
keys = append(keys, string(key))
|
println(string(key))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
return keys, err
|
fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,12 +95,16 @@ func NewApp() *cli.App {
|
||||||
Name: "generate",
|
Name: "generate",
|
||||||
Usage: "Generate data for benchmarks",
|
Usage: "Generate data for benchmarks",
|
||||||
Action: func(c *cli.Context) {
|
Action: func(c *cli.Context) {
|
||||||
numEvents, err := strconv.Atoi(c.Args().Get(0))
|
destPath := c.Args().Get(0)
|
||||||
|
numBuckets, err := strconv.Atoi(c.Args().Get(1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
}
|
}
|
||||||
destPath := c.Args().Get(1)
|
numItems, err := strconv.Atoi(c.Args().Get(2))
|
||||||
Generate(numEvents, destPath)
|
if err != nil {
|
||||||
|
fatal(err)
|
||||||
|
}
|
||||||
|
Generate(destPath, numBuckets, numItems)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -21,11 +21,6 @@ func Set(path, name, key, value string) {
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
err = db.Update(func(tx *bolt.Tx) error {
|
err = db.Update(func(tx *bolt.Tx) error {
|
||||||
// Create the bucket if it doesn't exist.
|
|
||||||
if err := tx.CreateBucketIfNotExists([]byte(name)); err != nil {
|
|
||||||
fatalf("create bucket: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find bucket.
|
// Find bucket.
|
||||||
b := tx.Bucket([]byte(name))
|
b := tx.Bucket([]byte(name))
|
||||||
|
|
158
tx_test.go
158
tx_test.go
|
@ -266,6 +266,123 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
|
||||||
assert.Equal(t, 0, x)
|
assert.Equal(t, 0, x)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkReadSequential_1Buckets_1Items_1Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1, 1)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10Items_1Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10, 1)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_100Items_1Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 100, 1)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_1000Items_1Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1000, 1)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10000Items_1Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10000, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkReadSequential_1Buckets_1Items_10Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1, 10)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10Items_10Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10, 10)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_100Items_10Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 100, 10)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_1000Items_10Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1000, 10)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10000Items_10Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10000, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkReadSequential_1Buckets_1Items_100Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1, 100)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10Items_100Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10, 100)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_100Items_100Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 100, 100)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_1000Items_100Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1000, 100)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10000Items_100Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10000, 100)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkReadSequential_1Buckets_1Items_1000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1, 1000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10Items_1000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10, 1000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_100Items_1000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 100, 1000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_1000Items_1000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1000, 1000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10000Items_1000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10000, 1000)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkReadSequential_1Buckets_1Items_10000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1, 10000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10Items_10000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10, 10000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_100Items_10000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 100, 10000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_1000Items_10000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 1000, 10000)
|
||||||
|
}
|
||||||
|
func BenchmarkReadSequential_1Buckets_10000Items_10000Concurrency(b *testing.B) {
|
||||||
|
benchmarkReadSequential(b, 1, 10000, 10000)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmark(b *testing.B, readWriteMode, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
if err := generateDB(db, numBuckets, numItemsPerBucket); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
NewBenchmark(db, readWriteMode, traversalPattern, parallelism).Run(b)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkRead(b *testing.B, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) {
|
||||||
|
benchmark(b, BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkReadSequential(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) {
|
||||||
|
benchmark(b, BenchReadMode, BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkReadRandom(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) {
|
||||||
|
benchmark(b, BenchReadMode, BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate and write data to specified number of buckets/items.
|
||||||
|
func generateDB(db *DB, numBuckets, numItemsPerBucket int) error {
|
||||||
|
return db.Update(func(tx *Tx) error {
|
||||||
|
for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ {
|
||||||
|
bucketName := fmt.Sprintf("bucket%08d")
|
||||||
|
tx.CreateBucket([]byte(bucketName))
|
||||||
|
bucket := tx.Bucket([]byte(bucketName))
|
||||||
|
for i := 0; i < numItemsPerBucket; i++ {
|
||||||
|
value := []byte(strings.Repeat("0", 100))
|
||||||
|
bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Benchmark the performance iterating over a cursor.
|
// Benchmark the performance iterating over a cursor.
|
||||||
func BenchmarkTxCursor1(b *testing.B) { benchmarkTxCursor(b, 1) }
|
func BenchmarkTxCursor1(b *testing.B) { benchmarkTxCursor(b, 1) }
|
||||||
func BenchmarkTxCursor10(b *testing.B) { benchmarkTxCursor(b, 10) }
|
func BenchmarkTxCursor10(b *testing.B) { benchmarkTxCursor(b, 10) }
|
||||||
|
@ -365,47 +482,6 @@ func benchmarkTxPutSequential(b *testing.B, total int) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// func BenchmarkParallel_1items_1threads(b *testing.B) { benchmarkParallel(1, 1) }
|
|
||||||
// func BenchmarkParallel_1items_10threads(b *testing.B) { benchmarkParallel(1, 10) }
|
|
||||||
// func BenchmarkParallel_1items_100threads(b *testing.B) { benchmarkParallel(1, 100) }
|
|
||||||
// func BenchmarkParallel_1items_1000threads(b *testing.B) { benchmarkParallel(1, 1000) }
|
|
||||||
|
|
||||||
// func BenchmarkParallel_10items_1threads(b *testing.B) { benchmarkParallel(10, 1) }
|
|
||||||
// func BenchmarkParallel_10items_10threads(b *testing.B) { benchmarkParallel(10, 10) }
|
|
||||||
// func BenchmarkParallel_10items_100threads(b *testing.B) { benchmarkParallel(10, 100) }
|
|
||||||
// func BenchmarkParallel_10items_1000threads(b *testing.B) { benchmarkParallel(10, 1000) }
|
|
||||||
|
|
||||||
// func BenchmarkParallel_100items_1threads(b *testing.B) { benchmarkParallel(100, 1) }
|
|
||||||
// func BenchmarkParallel_100items_10threads(b *testing.B) { benchmarkParallel(100, 10) }
|
|
||||||
// func BenchmarkParallel_100items_100threads(b *testing.B) { benchmarkParallel(100, 100) }
|
|
||||||
// func BenchmarkParallel_100items_1000threads(b *testing.B) { benchmarkParallel(100, 1000) }
|
|
||||||
|
|
||||||
// func BenchmarkParallel_1000items_1threads(b *testing.B) { benchmarkParallel(1000, 1) }
|
|
||||||
// func BenchmarkParallel_1000items_10threads(b *testing.B) { benchmarkParallel(1000, 10) }
|
|
||||||
// func BenchmarkParallel_1000items_100threads(b *testing.B) { benchmarkParallel(1000, 100) }
|
|
||||||
// func BenchmarkParallel_1000items_1000threads(b *testing.B) { benchmarkParallel(1000, 1000) }
|
|
||||||
|
|
||||||
// func benchmarkParallel(b *testing.B, itemCount, parallelism int) {
|
|
||||||
// // Setup database.
|
|
||||||
// for i := 0; i < itemCount; i++ {
|
|
||||||
// // ... insert key/values here ...
|
|
||||||
// }
|
|
||||||
// b.ResetTimer()
|
|
||||||
|
|
||||||
// // Keep running a fixed number of parallel reads until we run out of time.
|
|
||||||
// for i := 0; i < b.N; i++ {
|
|
||||||
// var wg sync.WaitGroup
|
|
||||||
// for j := 0; j < parallelism; j++ {
|
|
||||||
// wg.Add(1)
|
|
||||||
// go func() {
|
|
||||||
// // ... execute read here ...
|
|
||||||
// wg.Done()
|
|
||||||
// }()
|
|
||||||
// }
|
|
||||||
// wg.Wait()
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
func ExampleTx_Rollback() {
|
func ExampleTx_Rollback() {
|
||||||
// Open the database.
|
// Open the database.
|
||||||
db, _ := Open(tempfile(), 0666)
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
|
Loading…
Reference in New Issue