bench: aggregate adding completed ops for reads

Currently, the completed operations are added to the read benchmarks
one by one, and given that each operation is atomic, it impacts the
benchmark's performance. Change to update only once per cycle, with
the total number of reads.

Signed-off-by: Ivan Valdes <ivan@vald.es>
This commit is contained in:
Ivan Valdes 2024-04-12 15:14:56 -07:00
parent ee11a09015
commit 43c669db88
No known key found for this signature in database
GPG Key ID: 4037D37741ED0CC5

View File

@ -1380,15 +1380,24 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions,
for { for {
numReads := int64(0) numReads := int64(0)
err := func() error {
defer func() { results.AddCompletedOps(numReads) }()
c := tx.Bucket(benchBucketName).Cursor() c := tx.Bucket(benchBucketName).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() { for k, v := c.First(); k != nil; k, v = c.Next() {
numReads++ numReads++
results.AddCompletedOps(1)
if v == nil { if v == nil {
return ErrInvalidValue return ErrInvalidValue
} }
} }
return nil
}()
if err != nil {
return err
}
if options.WriteMode == "seq" && numReads != options.Iterations { if options.WriteMode == "seq" && numReads != options.Iterations {
return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads)
} }
@ -1409,16 +1418,25 @@ func (cmd *benchCommand) runReadsRandom(db *bolt.DB, options *BenchOptions, keys
for { for {
numReads := int64(0) numReads := int64(0)
err := func() error {
defer func() { results.AddCompletedOps(numReads) }()
b := tx.Bucket(benchBucketName) b := tx.Bucket(benchBucketName)
for _, key := range keys { for _, key := range keys {
v := b.Get(key.key) v := b.Get(key.key)
numReads++ numReads++
results.AddCompletedOps(1)
if v == nil { if v == nil {
return ErrInvalidValue return ErrInvalidValue
} }
} }
return nil
}()
if err != nil {
return err
}
if options.WriteMode == "seq" && numReads != options.Iterations { if options.WriteMode == "seq" && numReads != options.Iterations {
return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads)
} }
@ -1441,11 +1459,11 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt
numReads := int64(0) numReads := int64(0)
var top = tx.Bucket(benchBucketName) var top = tx.Bucket(benchBucketName)
if err := top.ForEach(func(name, _ []byte) error { if err := top.ForEach(func(name, _ []byte) error {
defer func() { results.AddCompletedOps(numReads) }()
if b := top.Bucket(name); b != nil { if b := top.Bucket(name); b != nil {
c := b.Cursor() c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() { for k, v := c.First(); k != nil; k, v = c.Next() {
numReads++ numReads++
results.AddCompletedOps(1)
if v == nil { if v == nil {
return ErrInvalidValue return ErrInvalidValue
} }
@ -1476,18 +1494,27 @@ func (cmd *benchCommand) runReadsRandomNested(db *bolt.DB, options *BenchOptions
for { for {
numReads := int64(0) numReads := int64(0)
err := func() error {
defer func() { results.AddCompletedOps(numReads) }()
var top = tx.Bucket(benchBucketName) var top = tx.Bucket(benchBucketName)
for _, nestedKey := range nestedKeys { for _, nestedKey := range nestedKeys {
if b := top.Bucket(nestedKey.bucket); b != nil { if b := top.Bucket(nestedKey.bucket); b != nil {
v := b.Get(nestedKey.key) v := b.Get(nestedKey.key)
numReads++ numReads++
results.AddCompletedOps(1)
if v == nil { if v == nil {
return ErrInvalidValue return ErrInvalidValue
} }
} }
} }
return nil
}()
if err != nil {
return err
}
if options.WriteMode == "seq-nest" && numReads != options.Iterations { if options.WriteMode == "seq-nest" && numReads != options.Iterations {
return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads) return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads)
} }