mirror of https://github.com/etcd-io/bbolt.git
Add --batch-size to 'bolt bench'.
This commit adds a --batch-size CLI argument to the 'bolt bench' tool. This argument will insert into Bolt in smaller batches which is a more typical use case. /cc @snormorepull/34/head
parent
b6135c2c95
commit
cabb44e01f
|
@ -88,22 +88,41 @@ func benchWrite(db *bolt.DB, options *BenchOptions, results *BenchResults) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchWriteSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
|
func benchWriteSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
|
||||||
results.WriteOps = options.Iterations
|
// Default batch size to iteration count, if not specified.
|
||||||
|
var batchSize, iterations = options.BatchSize, options.Iterations
|
||||||
|
if batchSize == 0 {
|
||||||
|
batchSize = iterations
|
||||||
|
}
|
||||||
|
|
||||||
return db.Update(func(tx *bolt.Tx) error {
|
// Insert in batches.
|
||||||
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
|
var count int
|
||||||
|
for i := 0; i < (iterations/batchSize)+1; i++ {
|
||||||
|
err := db.Update(func(tx *bolt.Tx) error {
|
||||||
|
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
|
||||||
|
|
||||||
for i := 0; i < options.Iterations; i++ {
|
for j := 0; j < batchSize && count < iterations; j++ {
|
||||||
var key = make([]byte, options.KeySize)
|
var key = make([]byte, options.KeySize)
|
||||||
var value = make([]byte, options.ValueSize)
|
var value = make([]byte, options.ValueSize)
|
||||||
binary.BigEndian.PutUint32(key, uint32(i))
|
binary.BigEndian.PutUint32(key, uint32(count))
|
||||||
if err := b.Put(key, value); err != nil {
|
|
||||||
return err
|
if err := b.Put(key, value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
count++
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the write op count.
|
||||||
|
results.WriteOps = count
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads from the database.
|
// Reads from the database.
|
||||||
|
@ -213,6 +232,7 @@ type BenchOptions struct {
|
||||||
WriteMode string
|
WriteMode string
|
||||||
ReadMode string
|
ReadMode string
|
||||||
Iterations int
|
Iterations int
|
||||||
|
BatchSize int
|
||||||
KeySize int
|
KeySize int
|
||||||
ValueSize int
|
ValueSize int
|
||||||
CPUProfile string
|
CPUProfile string
|
||||||
|
|
|
@ -98,6 +98,7 @@ func NewApp() *cli.App {
|
||||||
&cli.StringFlag{Name: "write-mode", Value: "seq", Usage: "Write mode"},
|
&cli.StringFlag{Name: "write-mode", Value: "seq", Usage: "Write mode"},
|
||||||
&cli.StringFlag{Name: "read-mode", Value: "seq", Usage: "Read mode"},
|
&cli.StringFlag{Name: "read-mode", Value: "seq", Usage: "Read mode"},
|
||||||
&cli.IntFlag{Name: "count", Value: 1000, Usage: "Item count"},
|
&cli.IntFlag{Name: "count", Value: 1000, Usage: "Item count"},
|
||||||
|
&cli.IntFlag{Name: "batch-size", Usage: "Insert batch size"},
|
||||||
&cli.IntFlag{Name: "key-size", Value: 8, Usage: "Key size"},
|
&cli.IntFlag{Name: "key-size", Value: 8, Usage: "Key size"},
|
||||||
&cli.IntFlag{Name: "value-size", Value: 32, Usage: "Value size"},
|
&cli.IntFlag{Name: "value-size", Value: 32, Usage: "Value size"},
|
||||||
&cli.StringFlag{Name: "cpuprofile", Usage: "CPU profile output path"},
|
&cli.StringFlag{Name: "cpuprofile", Usage: "CPU profile output path"},
|
||||||
|
@ -110,6 +111,7 @@ func NewApp() *cli.App {
|
||||||
WriteMode: c.String("write-mode"),
|
WriteMode: c.String("write-mode"),
|
||||||
ReadMode: c.String("read-mode"),
|
ReadMode: c.String("read-mode"),
|
||||||
Iterations: c.Int("count"),
|
Iterations: c.Int("count"),
|
||||||
|
BatchSize: c.Int("batch-size"),
|
||||||
KeySize: c.Int("key-size"),
|
KeySize: c.Int("key-size"),
|
||||||
ValueSize: c.Int("value-size"),
|
ValueSize: c.Int("value-size"),
|
||||||
CPUProfile: c.String("cpuprofile"),
|
CPUProfile: c.String("cpuprofile"),
|
||||||
|
|
Loading…
Reference in New Issue