Merge pull request #234 from benbjohnson/testing

Testing cleanup
This commit is contained in:
Ben Johnson 2014-07-26 19:47:10 -06:00
commit defbfd35af
24 changed files with 1921 additions and 1838 deletions

36
bolt_test.go Normal file
View File

@ -0,0 +1,36 @@
package bolt_test
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert fails the test if the condition is false.
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
}
// ok fails the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
tb.FailNow()
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View File

@ -634,7 +634,7 @@ func (b *Bucket) free() {
var tx = b.tx var tx = b.tx
b.forEachPageNode(func(p *page, n *node, _ int) { b.forEachPageNode(func(p *page, n *node, _ int) {
if p != nil { if p != nil {
tx.db.freelist.free(tx.id(), p) tx.db.freelist.free(tx.meta.txid, p)
} else { } else {
n.free() n.free()
} }

File diff suppressed because it is too large Load Diff

View File

@ -283,7 +283,7 @@ func benchStartProfiling(options *BenchOptions) {
if options.CPUProfile != "" { if options.CPUProfile != "" {
cpuprofile, err = os.Create(options.CPUProfile) cpuprofile, err = os.Create(options.CPUProfile)
if err != nil { if err != nil {
fatal("bench: could not create cpu profile %q: %v", options.CPUProfile, err) fatalf("bench: could not create cpu profile %q: %v", options.CPUProfile, err)
} }
pprof.StartCPUProfile(cpuprofile) pprof.StartCPUProfile(cpuprofile)
} }
@ -292,7 +292,7 @@ func benchStartProfiling(options *BenchOptions) {
if options.MemProfile != "" { if options.MemProfile != "" {
memprofile, err = os.Create(options.MemProfile) memprofile, err = os.Create(options.MemProfile)
if err != nil { if err != nil {
fatal("bench: could not create memory profile %q: %v", options.MemProfile, err) fatalf("bench: could not create memory profile %q: %v", options.MemProfile, err)
} }
runtime.MemProfileRate = 4096 runtime.MemProfileRate = 4096
} }
@ -301,7 +301,7 @@ func benchStartProfiling(options *BenchOptions) {
if options.BlockProfile != "" { if options.BlockProfile != "" {
blockprofile, err = os.Create(options.BlockProfile) blockprofile, err = os.Create(options.BlockProfile)
if err != nil { if err != nil {
fatal("bench: could not create block profile %q: %v", options.BlockProfile, err) fatalf("bench: could not create block profile %q: %v", options.BlockProfile, err)
} }
runtime.SetBlockProfileRate(1) runtime.SetBlockProfileRate(1)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a list of buckets can be retrieved. // Ensure that a list of buckets can be retrieved.
@ -20,7 +19,7 @@ func TestBuckets(t *testing.T) {
}) })
db.Close() db.Close()
output := run("buckets", path) output := run("buckets", path)
assert.Equal(t, "whatchits\nwidgets\nwoojits", output) equals(t, "whatchits\nwidgets\nwoojits", output)
}) })
} }
@ -28,5 +27,5 @@ func TestBuckets(t *testing.T) {
func TestBucketsDBNotFound(t *testing.T) { func TestBucketsDBNotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("buckets", "no/such/db") output := run("buckets", "no/such/db")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }

View File

@ -42,7 +42,7 @@ func Export(path string) {
// Encode all buckets into JSON. // Encode all buckets into JSON.
output, err := json.Marshal(root) output, err := json.Marshal(root)
if err != nil { if err != nil {
return fmt.Errorf("encode: ", err) return fmt.Errorf("encode: %s", err)
} }
print(string(output)) print(string(output))
return nil return nil

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a database can be exported. // Ensure that a database can be exported.
@ -32,7 +31,7 @@ func TestExport(t *testing.T) {
}) })
db.Close() db.Close()
output := run("export", path) output := run("export", path)
assert.Equal(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output) equals(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output)
}) })
} }
@ -40,5 +39,5 @@ func TestExport(t *testing.T) {
func TestExport_NotFound(t *testing.T) { func TestExport_NotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("export", "no/such/db") output := run("export", "no/such/db")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a value can be retrieved from the CLI. // Ensure that a value can be retrieved from the CLI.
@ -19,7 +18,7 @@ func TestGet(t *testing.T) {
}) })
db.Close() db.Close()
output := run("get", path, "widgets", "foo") output := run("get", path, "widgets", "foo")
assert.Equal(t, "bar", output) equals(t, "bar", output)
}) })
} }
@ -27,7 +26,7 @@ func TestGet(t *testing.T) {
func TestGetDBNotFound(t *testing.T) { func TestGetDBNotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("get", "no/such/db", "widgets", "foo") output := run("get", "no/such/db", "widgets", "foo")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }
// Ensure that an error is reported if the bucket is not found. // Ensure that an error is reported if the bucket is not found.
@ -36,7 +35,7 @@ func TestGetBucketNotFound(t *testing.T) {
open(func(db *bolt.DB, path string) { open(func(db *bolt.DB, path string) {
db.Close() db.Close()
output := run("get", path, "widgets", "foo") output := run("get", path, "widgets", "foo")
assert.Equal(t, "bucket not found: widgets", output) equals(t, "bucket not found: widgets", output)
}) })
} }
@ -50,6 +49,6 @@ func TestGetKeyNotFound(t *testing.T) {
}) })
db.Close() db.Close()
output := run("get", path, "widgets", "foo") output := run("get", path, "widgets", "foo")
assert.Equal(t, "key not found: foo", output) equals(t, "key not found: foo", output)
}) })
} }

View File

@ -6,7 +6,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a database can be imported. // Ensure that a database can be imported.
@ -15,32 +14,30 @@ func TestImport(t *testing.T) {
// Write input file. // Write input file.
input := tempfile() input := tempfile()
assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600)) ok(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600))
// Import database. // Import database.
path := tempfile() path := tempfile()
output := run("import", path, "--input", input) output := run("import", path, "--input", input)
assert.Equal(t, ``, output) equals(t, ``, output)
// Open database and verify contents. // Open database and verify contents.
db, err := bolt.Open(path, 0600, nil) db, err := bolt.Open(path, 0600, nil)
assert.NoError(t, err) ok(t, err)
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("empty"))) assert(t, tx.Bucket([]byte("empty")) != nil, "")
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
if assert.NotNil(t, b) { assert(t, b != nil, "")
assert.Equal(t, []byte("0000"), b.Get([]byte("foo"))) equals(t, []byte("0000"), b.Get([]byte("foo")))
assert.Equal(t, []byte(""), b.Get([]byte("bar"))) equals(t, []byte(""), b.Get([]byte("bar")))
}
b = tx.Bucket([]byte("woojits")) b = tx.Bucket([]byte("woojits"))
if assert.NotNil(t, b) { assert(t, b != nil, "")
assert.Equal(t, []byte("XXXX"), b.Get([]byte("baz"))) equals(t, []byte("XXXX"), b.Get([]byte("baz")))
b = b.Bucket([]byte("woojits/subbucket")) b = b.Bucket([]byte("woojits/subbucket"))
assert.Equal(t, []byte("A"), b.Get([]byte("bat"))) equals(t, []byte("A"), b.Get([]byte("bat")))
}
return nil return nil
}) })
@ -51,5 +48,5 @@ func TestImport(t *testing.T) {
func TestImport_NotFound(t *testing.T) { func TestImport_NotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("import", "path/to/db", "--input", "no/such/file") output := run("import", "path/to/db", "--input", "no/such/file")
assert.Equal(t, "open no/such/file: no such file or directory", output) equals(t, "open no/such/file: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a database info can be printed. // Ensure that a database info can be printed.
@ -20,7 +19,7 @@ func TestInfo(t *testing.T) {
}) })
db.Close() db.Close()
output := run("info", path) output := run("info", path)
assert.Equal(t, `Page Size: 4096`, output) equals(t, `Page Size: 4096`, output)
}) })
} }
@ -28,5 +27,5 @@ func TestInfo(t *testing.T) {
func TestInfo_NotFound(t *testing.T) { func TestInfo_NotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("info", "no/such/db") output := run("info", "no/such/db")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a list of keys can be retrieved for a given bucket. // Ensure that a list of keys can be retrieved for a given bucket.
@ -21,7 +20,7 @@ func TestKeys(t *testing.T) {
}) })
db.Close() db.Close()
output := run("keys", path, "widgets") output := run("keys", path, "widgets")
assert.Equal(t, "0001\n0002\n0003", output) equals(t, "0001\n0002\n0003", output)
}) })
} }
@ -29,7 +28,7 @@ func TestKeys(t *testing.T) {
func TestKeysDBNotFound(t *testing.T) { func TestKeysDBNotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("keys", "no/such/db", "widgets") output := run("keys", "no/such/db", "widgets")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }
// Ensure that an error is reported if the bucket is not found. // Ensure that an error is reported if the bucket is not found.
@ -38,6 +37,6 @@ func TestKeysBucketNotFound(t *testing.T) {
open(func(db *bolt.DB, path string) { open(func(db *bolt.DB, path string) {
db.Close() db.Close()
output := run("keys", path, "widgets") output := run("keys", path, "widgets")
assert.Equal(t, "bucket not found: widgets", output) equals(t, "bucket not found: widgets", output)
}) })
} }

View File

@ -1,9 +1,14 @@
package main_test package main_test
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"reflect"
"runtime"
"strings" "strings"
"testing"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
@ -35,3 +40,30 @@ func tempfile() string {
os.Remove(f.Name()) os.Remove(f.Name())
return f.Name() return f.Name()
} }
// assert fails the test if the condition is false.
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
}
// ok fails the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
tb.FailNow()
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View File

@ -7,7 +7,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
func TestStats(t *testing.T) { func TestStats(t *testing.T) {
@ -40,7 +39,7 @@ func TestStats(t *testing.T) {
}) })
db.Close() db.Close()
output := run("stats", path, "b") output := run("stats", path, "b")
assert.Equal(t, "Aggregate statistics for 2 buckets\n\n"+ equals(t, "Aggregate statistics for 2 buckets\n\n"+
"Page count statistics\n"+ "Page count statistics\n"+
"\tNumber of logical branch pages: 0\n"+ "\tNumber of logical branch pages: 0\n"+
"\tNumber of physical branch overflow pages: 0\n"+ "\tNumber of physical branch overflow pages: 0\n"+

View File

@ -1,4 +1,4 @@
package bolt package bolt_test
import ( import (
"bytes" "bytes"
@ -7,103 +7,104 @@ import (
"testing" "testing"
"testing/quick" "testing/quick"
"github.com/stretchr/testify/assert" "github.com/boltdb/bolt"
) )
// Ensure that a cursor can return a reference to the bucket that created it. // Ensure that a cursor can return a reference to the bucket that created it.
func TestCursor_Bucket(t *testing.T) { func TestCursor_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, _ := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
c := b.Cursor() b, _ := tx.CreateBucket([]byte("widgets"))
assert.Equal(t, b, c.Bucket()) c := b.Cursor()
return nil equals(t, b, c.Bucket())
}) return nil
}) })
} }
// Ensure that a Tx cursor can seek to the appropriate keys. // Ensure that a Tx cursor can seek to the appropriate keys.
func TestCursor_Seek(t *testing.T) { func TestCursor_Seek(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
assert.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, b.Put([]byte("foo"), []byte("0001"))) ok(t, err)
assert.NoError(t, b.Put([]byte("bar"), []byte("0002"))) ok(t, b.Put([]byte("foo"), []byte("0001")))
assert.NoError(t, b.Put([]byte("baz"), []byte("0003"))) ok(t, b.Put([]byte("bar"), []byte("0002")))
_, err = b.CreateBucket([]byte("bkt")) ok(t, b.Put([]byte("baz"), []byte("0003")))
assert.NoError(t, err) _, err = b.CreateBucket([]byte("bkt"))
return nil ok(t, err)
}) return nil
db.View(func(tx *Tx) error { })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
// Exact match should go to the key. // Exact match should go to the key.
k, v := c.Seek([]byte("bar")) k, v := c.Seek([]byte("bar"))
assert.Equal(t, []byte("bar"), k) equals(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v) equals(t, []byte("0002"), v)
// Inexact match should go to the next key. // Inexact match should go to the next key.
k, v = c.Seek([]byte("bas")) k, v = c.Seek([]byte("bas"))
assert.Equal(t, []byte("baz"), k) equals(t, []byte("baz"), k)
assert.Equal(t, []byte("0003"), v) equals(t, []byte("0003"), v)
// Low key should go to the first key. // Low key should go to the first key.
k, v = c.Seek([]byte("")) k, v = c.Seek([]byte(""))
assert.Equal(t, []byte("bar"), k) equals(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v) equals(t, []byte("0002"), v)
// High key should return no key. // High key should return no key.
k, v = c.Seek([]byte("zzz")) k, v = c.Seek([]byte("zzz"))
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
// Buckets should return their key but no value. // Buckets should return their key but no value.
k, v = c.Seek([]byte("bkt")) k, v = c.Seek([]byte("bkt"))
assert.Equal(t, []byte("bkt"), k) equals(t, []byte("bkt"), k)
assert.Nil(t, v) assert(t, v == nil, "")
return nil return nil
})
}) })
} }
func TestCursor_Delete(t *testing.T) { func TestCursor_Delete(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
var count = 1000 defer db.Close()
// Insert every other key between 0 and $count. var count = 1000
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets")) // Insert every other key between 0 and $count.
for i := 0; i < count; i += 1 { db.Update(func(tx *bolt.Tx) error {
k := make([]byte, 8) b, _ := tx.CreateBucket([]byte("widgets"))
binary.BigEndian.PutUint64(k, uint64(i)) for i := 0; i < count; i += 1 {
b.Put(k, make([]byte, 100)) k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
b.Put(k, make([]byte, 100))
}
b.CreateBucket([]byte("sub"))
return nil
})
db.Update(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
} }
b.CreateBucket([]byte("sub")) }
return nil c.Seek([]byte("sub"))
}) err := c.Delete()
equals(t, err, bolt.ErrIncompatibleValue)
return nil
})
db.Update(func(tx *Tx) error { db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() b := tx.Bucket([]byte("widgets"))
bound := make([]byte, 8) equals(t, b.Stats().KeyN, count/2+1)
binary.BigEndian.PutUint64(bound, uint64(count/2)) return nil
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
}
}
c.Seek([]byte("sub"))
err := c.Delete()
assert.Equal(t, err, ErrIncompatibleValue)
return nil
})
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.Equal(t, b.Stats().KeyN, count/2+1)
return nil
})
}) })
} }
@ -113,216 +114,223 @@ func TestCursor_Delete(t *testing.T) {
// //
// Related: https://github.com/boltdb/bolt/pull/187 // Related: https://github.com/boltdb/bolt/pull/187
func TestCursor_Seek_Large(t *testing.T) { func TestCursor_Seek_Large(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
var count = 10000 defer db.Close()
// Insert every other key between 0 and $count. var count = 10000
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets")) // Insert every other key between 0 and $count.
for i := 0; i < count; i += 100 { db.Update(func(tx *bolt.Tx) error {
for j := i; j < i+100; j += 2 { b, _ := tx.CreateBucket([]byte("widgets"))
k := make([]byte, 8) for i := 0; i < count; i += 100 {
binary.BigEndian.PutUint64(k, uint64(j)) for j := i; j < i+100; j += 2 {
b.Put(k, make([]byte, 100)) k := make([]byte, 8)
} binary.BigEndian.PutUint64(k, uint64(j))
b.Put(k, make([]byte, 100))
} }
return nil }
}) return nil
})
db.View(func(tx *Tx) error { db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
seek := make([]byte, 8) seek := make([]byte, 8)
binary.BigEndian.PutUint64(seek, uint64(i)) binary.BigEndian.PutUint64(seek, uint64(i))
k, _ := c.Seek(seek) k, _ := c.Seek(seek)
// The last seek is beyond the end of the the range so // The last seek is beyond the end of the the range so
// it should return nil. // it should return nil.
if i == count-1 { if i == count-1 {
assert.Nil(t, k) assert(t, k == nil, "")
continue continue
}
// Otherwise we should seek to the exact key or the next key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
assert.Equal(t, uint64(i), num)
} else {
assert.Equal(t, uint64(i+1), num)
}
} }
return nil // Otherwise we should seek to the exact key or the next key.
}) num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
equals(t, uint64(i), num)
} else {
equals(t, uint64(i+1), num)
}
}
return nil
}) })
} }
// Ensure that a cursor can iterate over an empty bucket without error. // Ensure that a cursor can iterate over an empty bucket without error.
func TestCursor_EmptyBucket(t *testing.T) { func TestCursor_EmptyBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
db.View(func(tx *Tx) error { })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *bolt.Tx) error {
k, v := c.First() c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, k) k, v := c.First()
assert.Nil(t, v) assert(t, k == nil, "")
return nil assert(t, v == nil, "")
}) return nil
}) })
} }
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. // Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
func TestCursor_EmptyBucketReverse(t *testing.T) { func TestCursor_EmptyBucketReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets"))
return err db.Update(func(tx *bolt.Tx) error {
}) _, err := tx.CreateBucket([]byte("widgets"))
db.View(func(tx *Tx) error { return err
c := tx.Bucket([]byte("widgets")).Cursor() })
k, v := c.Last() db.View(func(tx *bolt.Tx) error {
assert.Nil(t, k) c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, v) k, v := c.Last()
return nil assert(t, k == nil, "")
}) assert(t, v == nil, "")
return nil
}) })
} }
// Ensure that a Tx cursor can iterate over a single root with a couple elements. // Ensure that a Tx cursor can iterate over a single root with a couple elements.
func TestCursor_Iterate_Leaf(t *testing.T) { func TestCursor_Iterate_Leaf(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First() db.Update(func(tx *bolt.Tx) error {
assert.Equal(t, string(k), "bar") tx.CreateBucket([]byte("widgets"))
assert.Equal(t, v, []byte{1}) tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
k, v = c.Next() tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
assert.Equal(t, string(k), "baz") return nil
assert.Equal(t, v, []byte{})
k, v = c.Next()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
}) })
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
equals(t, string(k), "bar")
equals(t, v, []byte{1})
k, v = c.Next()
equals(t, string(k), "baz")
equals(t, v, []byte{})
k, v = c.Next()
equals(t, string(k), "foo")
equals(t, v, []byte{0})
k, v = c.Next()
assert(t, k == nil, "")
assert(t, v == nil, "")
k, v = c.Next()
assert(t, k == nil, "")
assert(t, v == nil, "")
tx.Rollback()
} }
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
func TestCursor_LeafRootReverse(t *testing.T) { func TestCursor_LeafRootReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last() db.Update(func(tx *bolt.Tx) error {
assert.Equal(t, string(k), "foo") tx.CreateBucket([]byte("widgets"))
assert.Equal(t, v, []byte{0}) tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
k, v = c.Prev() tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
assert.Equal(t, string(k), "baz") return nil
assert.Equal(t, v, []byte{})
k, v = c.Prev()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
}) })
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
equals(t, string(k), "foo")
equals(t, v, []byte{0})
k, v = c.Prev()
equals(t, string(k), "baz")
equals(t, v, []byte{})
k, v = c.Prev()
equals(t, string(k), "bar")
equals(t, v, []byte{1})
k, v = c.Prev()
assert(t, k == nil, "")
assert(t, v == nil, "")
k, v = c.Prev()
assert(t, k == nil, "")
assert(t, v == nil, "")
tx.Rollback()
} }
// Ensure that a Tx cursor can restart from the beginning. // Ensure that a Tx cursor can restart from the beginning.
func TestCursor_Restart(t *testing.T) { func TestCursor_Restart(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
return nil
})
tx, _ := db.Begin(false) db.Update(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
k, _ := c.First() tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
assert.Equal(t, string(k), "bar") return nil
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
k, _ = c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
tx.Rollback()
}) })
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, _ := c.First()
equals(t, string(k), "bar")
k, _ = c.Next()
equals(t, string(k), "foo")
k, _ = c.First()
equals(t, string(k), "bar")
k, _ = c.Next()
equals(t, string(k), "foo")
tx.Rollback()
} }
// Ensure that a Tx can iterate over all elements in a bucket. // Ensure that a Tx can iterate over all elements in a bucket.
func TestCursor_QuickCheck(t *testing.T) { func TestCursor_QuickCheck(t *testing.T) {
f := func(items testdata) bool { f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Bulk insert all values. defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data. // Bulk insert all values.
sort.Sort(items) tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
ok(t, b.Put(item.Key, item.Value))
}
ok(t, tx.Commit())
// Sort test data.
sort.Sort(items)
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
equals(t, k, items[index].Key)
equals(t, v, items[index].Value)
index++
}
equals(t, len(items), index)
tx.Rollback()
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true return true
} }
if err := quick.Check(f, qconfig()); err != nil { if err := quick.Check(f, qconfig()); err != nil {
@ -333,31 +341,33 @@ func TestCursor_QuickCheck(t *testing.T) {
// Ensure that a transaction can iterate over all elements in a bucket in reverse. // Ensure that a transaction can iterate over all elements in a bucket in reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) { func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool { f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Bulk insert all values. defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data. // Bulk insert all values.
sort.Sort(revtestdata(items)) tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
ok(t, b.Put(item.Key, item.Value))
}
ok(t, tx.Commit())
// Sort test data.
sort.Sort(revtestdata(items))
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
equals(t, k, items[index].Key)
equals(t, v, items[index].Value)
index++
}
equals(t, len(items), index)
tx.Rollback()
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true return true
} }
if err := quick.Check(f, qconfig()); err != nil { if err := quick.Check(f, qconfig()); err != nil {
@ -367,54 +377,56 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
// Ensure that a Tx cursor can iterate over subbuckets. // Ensure that a Tx cursor can iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) db.Update(func(tx *bolt.Tx) error {
_, err = b.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("bar")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("baz")) _, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
return nil _, err = b.CreateBucket([]byte("baz"))
}) ok(t, err)
db.View(func(tx *Tx) error { return nil
var names []string })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *bolt.Tx) error {
for k, v := c.First(); k != nil; k, v = c.Next() { var names []string
names = append(names, string(k)) c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, v) for k, v := c.First(); k != nil; k, v = c.Next() {
} names = append(names, string(k))
assert.Equal(t, names, []string{"bar", "baz", "foo"}) assert(t, v == nil, "")
return nil }
}) equals(t, names, []string{"bar", "baz", "foo"})
return nil
}) })
} }
// Ensure that a Tx cursor can reverse iterate over subbuckets. // Ensure that a Tx cursor can reverse iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) db.Update(func(tx *bolt.Tx) error {
_, err = b.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("bar")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("baz")) _, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
return nil _, err = b.CreateBucket([]byte("baz"))
}) ok(t, err)
db.View(func(tx *Tx) error { return nil
var names []string })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *bolt.Tx) error {
for k, v := c.Last(); k != nil; k, v = c.Prev() { var names []string
names = append(names, string(k)) c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, v) for k, v := c.Last(); k != nil; k, v = c.Prev() {
} names = append(names, string(k))
assert.Equal(t, names, []string{"foo", "baz", "bar"}) assert(t, v == nil, "")
return nil }
}) equals(t, names, []string{"foo", "baz", "bar"})
return nil
}) })
} }

4
db.go
View File

@ -384,8 +384,8 @@ func (db *DB) beginRWTx() (*Tx, error) {
// Free any pages associated with closed read-only transactions. // Free any pages associated with closed read-only transactions.
var minid txid = 0xFFFFFFFFFFFFFFFF var minid txid = 0xFFFFFFFFFFFFFFFF
for _, t := range db.txs { for _, t := range db.txs {
if t.id() < minid { if t.meta.txid < minid {
minid = t.id() minid = t.meta.txid
} }
} }
if minid > 0 { if minid > 0 {

View File

@ -1,4 +1,4 @@
package bolt package bolt_test
import ( import (
"errors" "errors"
@ -12,29 +12,28 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
"unsafe"
"github.com/stretchr/testify/assert" "github.com/boltdb/bolt"
) )
var statsFlag = flag.Bool("stats", false, "show performance stats") var statsFlag = flag.Bool("stats", false, "show performance stats")
// Ensure that opening a database with a bad path returns an error. // Ensure that opening a database with a bad path returns an error.
func TestOpen_BadPath(t *testing.T) { func TestOpen_BadPath(t *testing.T) {
db, err := Open("", 0666, nil) db, err := bolt.Open("", 0666, nil)
assert.Error(t, err) assert(t, err != nil, "err: %s", err)
assert.Nil(t, db) assert(t, db == nil, "")
} }
// Ensure that a database can be opened without error. // Ensure that a database can be opened without error.
func TestOpen(t *testing.T) { func TestOpen(t *testing.T) {
withTempPath(func(path string) { path := tempfile()
db, err := Open(path, 0666, nil) defer os.Remove(path)
assert.NotNil(t, db) db, err := bolt.Open(path, 0666, nil)
assert.NoError(t, err) assert(t, db != nil, "")
assert.Equal(t, db.Path(), path) ok(t, err)
assert.NoError(t, db.Close()) equals(t, db.Path(), path)
}) ok(t, db.Close())
} }
// Ensure that opening an already open database file will timeout. // Ensure that opening an already open database file will timeout.
@ -42,21 +41,23 @@ func TestOpen_Timeout(t *testing.T) {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("timeout not supported on windows") t.Skip("timeout not supported on windows")
} }
withTempPath(func(path string) {
// Open a data file.
db0, err := Open(path, 0666, nil)
assert.NotNil(t, db0)
assert.NoError(t, err)
// Attempt to open the database again. path := tempfile()
start := time.Now() defer os.Remove(path)
db1, err := Open(path, 0666, &Options{Timeout: 100 * time.Millisecond})
assert.Nil(t, db1)
assert.Equal(t, ErrTimeout, err)
assert.True(t, time.Since(start) > 100*time.Millisecond)
db0.Close() // Open a data file.
}) db0, err := bolt.Open(path, 0666, nil)
assert(t, db0 != nil, "")
ok(t, err)
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond})
assert(t, db1 == nil, "")
equals(t, bolt.ErrTimeout, err)
assert(t, time.Since(start) > 100*time.Millisecond, "")
db0.Close()
} }
// Ensure that opening an already open database file will wait until its closed. // Ensure that opening an already open database file will wait until its closed.
@ -64,48 +65,51 @@ func TestOpen_Wait(t *testing.T) {
if runtime.GOOS == "windows" { if runtime.GOOS == "windows" {
t.Skip("timeout not supported on windows") t.Skip("timeout not supported on windows")
} }
withTempPath(func(path string) {
// Open a data file.
db0, err := Open(path, 0666, nil)
assert.NotNil(t, db0)
assert.NoError(t, err)
// Close it in just a bit. path := tempfile()
time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) defer os.Remove(path)
// Attempt to open the database again. // Open a data file.
start := time.Now() db0, err := bolt.Open(path, 0666, nil)
db1, err := Open(path, 0666, &Options{Timeout: 200 * time.Millisecond}) assert(t, db0 != nil, "")
assert.NotNil(t, db1) ok(t, err)
assert.NoError(t, err)
assert.True(t, time.Since(start) > 100*time.Millisecond) // Close it in just a bit.
}) time.AfterFunc(100*time.Millisecond, func() { db0.Close() })
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond})
assert(t, db1 != nil, "")
ok(t, err)
assert(t, time.Since(start) > 100*time.Millisecond, "")
} }
// Ensure that a re-opened database is consistent. // Ensure that a re-opened database is consistent.
func TestOpen_Check(t *testing.T) { func TestOpen_Check(t *testing.T) {
withTempPath(func(path string) { path := tempfile()
db, err := Open(path, 0666, nil) defer os.Remove(path)
assert.NoError(t, err)
assert.NoError(t, db.View(func(tx *Tx) error { return <-tx.Check() }))
db.Close()
db, err = Open(path, 0666, nil) db, err := bolt.Open(path, 0666, nil)
assert.NoError(t, err) ok(t, err)
assert.NoError(t, db.View(func(tx *Tx) error { return <-tx.Check() })) ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
db.Close() db.Close()
})
db, err = bolt.Open(path, 0666, nil)
ok(t, err)
ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
db.Close()
} }
// Ensure that the database returns an error if the file handle cannot be open. // Ensure that the database returns an error if the file handle cannot be open.
func TestDB_Open_FileError(t *testing.T) { func TestDB_Open_FileError(t *testing.T) {
withTempPath(func(path string) { path := tempfile()
_, err := Open(path+"/youre-not-my-real-parent", 0666, nil) defer os.Remove(path)
if err, _ := err.(*os.PathError); assert.Error(t, err) {
assert.Equal(t, path+"/youre-not-my-real-parent", err.Path) _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil)
assert.Equal(t, "open", err.Op) assert(t, err.(*os.PathError) != nil, "")
} equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path)
}) equals(t, "open", err.(*os.PathError).Op)
} }
// Ensure that write errors to the meta file handler during initialization are returned. // Ensure that write errors to the meta file handler during initialization are returned.
@ -115,218 +119,227 @@ func TestDB_Open_MetaInitWriteError(t *testing.T) {
// Ensure that a database that is too small returns an error. // Ensure that a database that is too small returns an error.
func TestDB_Open_FileTooSmall(t *testing.T) { func TestDB_Open_FileTooSmall(t *testing.T) {
withTempPath(func(path string) { path := tempfile()
db, err := Open(path, 0666, nil) defer os.Remove(path)
assert.NoError(t, err)
db.Close()
// corrupt the database db, err := bolt.Open(path, 0666, nil)
assert.NoError(t, os.Truncate(path, int64(os.Getpagesize()))) ok(t, err)
db.Close()
db, err = Open(path, 0666, nil) // corrupt the database
assert.Equal(t, errors.New("file size too small"), err) ok(t, os.Truncate(path, int64(os.Getpagesize())))
})
db, err = bolt.Open(path, 0666, nil)
equals(t, errors.New("file size too small"), err)
} }
// Ensure that corrupt meta0 page errors get returned. // TODO(benbjohnson): Test corruption at every byte of the first two pages.
func TestDB_Open_CorruptMeta0(t *testing.T) {
withTempPath(func(path string) {
var m meta
m.magic = magic
m.version = version
m.pageSize = 0x8000
// Create a file with bad magic.
b := make([]byte, 0x10000)
p0, p1 := (*page)(unsafe.Pointer(&b[0x0000])), (*page)(unsafe.Pointer(&b[0x8000]))
p0.meta().magic = 0
p0.meta().version = version
p1.meta().magic = magic
p1.meta().version = version
err := ioutil.WriteFile(path, b, 0666)
assert.NoError(t, err)
// Open the database.
_, err = Open(path, 0666, nil)
assert.Equal(t, err, errors.New("meta0 error: invalid database"))
})
}
// Ensure that a corrupt meta page checksum causes the open to fail.
func TestDB_Open_MetaChecksumError(t *testing.T) {
for i := 0; i < 2; i++ {
withTempPath(func(path string) {
db, err := Open(path, 0600, nil)
pageSize := db.pageSize
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("woojits"))
return err
})
db.Close()
// Change a single byte in the meta page.
f, _ := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)
f.WriteAt([]byte{1}, int64((i*pageSize)+(pageHeaderSize+12)))
f.Sync()
f.Close()
// Reopen the database.
_, err = Open(path, 0600, nil)
if assert.Error(t, err) {
if i == 0 {
assert.Equal(t, "meta0 error: checksum error", err.Error())
} else {
assert.Equal(t, "meta1 error: checksum error", err.Error())
}
}
})
}
}
// Ensure that a database cannot open a transaction when it's not open. // Ensure that a database cannot open a transaction when it's not open.
func TestDB_Begin_DatabaseNotOpen(t *testing.T) { func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
var db DB var db bolt.DB
tx, err := db.Begin(false) tx, err := db.Begin(false)
assert.Nil(t, tx) assert(t, tx == nil, "")
assert.Equal(t, err, ErrDatabaseNotOpen) equals(t, err, bolt.ErrDatabaseNotOpen)
} }
// Ensure that a read-write transaction can be retrieved. // Ensure that a read-write transaction can be retrieved.
func TestDB_BeginRW(t *testing.T) { func TestDB_BeginRW(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, err := db.Begin(true) defer db.Close()
assert.NotNil(t, tx) tx, err := db.Begin(true)
assert.NoError(t, err) assert(t, tx != nil, "")
assert.Equal(t, tx.DB(), db) ok(t, err)
assert.Equal(t, tx.Writable(), true) assert(t, tx.DB() == db.DB, "")
assert.NoError(t, tx.Commit()) equals(t, tx.Writable(), true)
}) ok(t, tx.Commit())
} }
// Ensure that opening a transaction while the DB is closed returns an error. // Ensure that opening a transaction while the DB is closed returns an error.
func TestDB_BeginRW_Closed(t *testing.T) { func TestDB_BeginRW_Closed(t *testing.T) {
var db DB var db bolt.DB
tx, err := db.Begin(true) tx, err := db.Begin(true)
assert.Equal(t, err, ErrDatabaseNotOpen) equals(t, err, bolt.ErrDatabaseNotOpen)
assert.Nil(t, tx) assert(t, tx == nil, "")
} }
// Ensure a database can provide a transactional block. // Ensure a database can provide a transactional block.
func TestDB_Update(t *testing.T) { func TestDB_Update(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
err := db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar")) b := tx.Bucket([]byte("widgets"))
b.Put([]byte("baz"), []byte("bat")) b.Put([]byte("foo"), []byte("bar"))
b.Delete([]byte("foo")) b.Put([]byte("baz"), []byte("bat"))
return nil b.Delete([]byte("foo"))
}) return nil
assert.NoError(t, err)
err = db.View(func(tx *Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
assert.NoError(t, err)
}) })
ok(t, err)
err = db.View(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
ok(t, err)
} }
// Ensure a closed database returns an error while running a transaction block // Ensure a closed database returns an error while running a transaction block
func TestDB_Update_Closed(t *testing.T) { func TestDB_Update_Closed(t *testing.T) {
var db DB var db bolt.DB
err := db.Update(func(tx *Tx) error { err := db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
return nil return nil
}) })
assert.Equal(t, err, ErrDatabaseNotOpen) equals(t, err, bolt.ErrDatabaseNotOpen)
} }
// Ensure a panic occurs while trying to commit a managed transaction. // Ensure a panic occurs while trying to commit a managed transaction.
func TestDB_Update_ManualCommitAndRollback(t *testing.T) { func TestDB_Update_ManualCommit(t *testing.T) {
var db DB db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
assert.Panics(t, func() { tx.Commit() }) var ok bool
assert.Panics(t, func() { tx.Rollback() }) db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Commit()
}()
return nil return nil
}) })
db.View(func(tx *Tx) error { assert(t, ok, "expected panic")
assert.Panics(t, func() { tx.Commit() }) }
assert.Panics(t, func() { tx.Rollback() })
// Ensure a panic occurs while trying to rollback a managed transaction.
func TestDB_Update_ManualRollback(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Rollback()
}()
return nil return nil
}) })
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to commit a managed transaction.
func TestDB_View_ManualCommit(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Commit()
}()
return nil
})
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to rollback a managed transaction.
func TestDB_View_ManualRollback(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Rollback()
}()
return nil
})
assert(t, ok, "expected panic")
} }
// Ensure a write transaction that panics does not hold open locks. // Ensure a write transaction that panics does not hold open locks.
func TestDB_Update_Panic(t *testing.T) { func TestDB_Update_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
func() { defer db.Close()
defer func() {
if r := recover(); r != nil { func() {
warn("recover: update", r) defer func() {
} if r := recover(); r != nil {
}() t.Log("recover: update", r)
db.Update(func(tx *Tx) error { }
tx.CreateBucket([]byte("widgets"))
panic("omg")
})
}() }()
db.Update(func(tx *bolt.Tx) error {
// Verify we can update again. tx.CreateBucket([]byte("widgets"))
err := db.Update(func(tx *Tx) error { panic("omg")
_, err := tx.CreateBucket([]byte("widgets"))
return err
}) })
assert.NoError(t, err) }()
// Verify that our change persisted. // Verify we can update again.
err = db.Update(func(tx *Tx) error { err := db.Update(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) _, err := tx.CreateBucket([]byte("widgets"))
return nil return err
}) })
ok(t, err)
// Verify that our change persisted.
err = db.Update(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
return nil
}) })
} }
// Ensure a database can return an error through a read-only transactional block. // Ensure a database can return an error through a read-only transactional block.
func TestDB_View_Error(t *testing.T) { func TestDB_View_Error(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
err := db.View(func(tx *Tx) error { defer db.Close()
return errors.New("xxx") err := db.View(func(tx *bolt.Tx) error {
}) return errors.New("xxx")
assert.Equal(t, errors.New("xxx"), err)
}) })
equals(t, errors.New("xxx"), err)
} }
// Ensure a read transaction that panics does not hold open locks. // Ensure a read transaction that panics does not hold open locks.
func TestDB_View_Panic(t *testing.T) { func TestDB_View_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
return nil tx.CreateBucket([]byte("widgets"))
}) return nil
})
func() { func() {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
warn("recover: view", r) t.Log("recover: view", r)
} }
}()
db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
panic("omg")
})
}() }()
db.View(func(tx *bolt.Tx) error {
// Verify that we can still use read transactions. assert(t, tx.Bucket([]byte("widgets")) != nil, "")
db.View(func(tx *Tx) error { panic("omg")
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
}) })
}()
// Verify that we can still use read transactions.
db.View(func(tx *bolt.Tx) error {
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
return nil
}) })
} }
@ -337,157 +350,85 @@ func TestDB_Commit_WriteFail(t *testing.T) {
// Ensure that DB stats can be returned. // Ensure that DB stats can be returned.
func TestDB_Stats(t *testing.T) { func TestDB_Stats(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
stats := db.Stats()
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
}) })
} stats := db.Stats()
equals(t, 2, stats.TxStats.PageCount)
// Ensure that the mmap grows appropriately. equals(t, 0, stats.FreePageN)
func TestDB_mmapSize(t *testing.T) { equals(t, 2, stats.PendingPageN)
db := &DB{pageSize: 4096}
assert.Equal(t, db.mmapSize(0), minMmapSize)
assert.Equal(t, db.mmapSize(16384), minMmapSize)
assert.Equal(t, db.mmapSize(minMmapSize-1), minMmapSize)
assert.Equal(t, db.mmapSize(minMmapSize), minMmapSize)
assert.Equal(t, db.mmapSize(minMmapSize+1), (minMmapSize*2)+4096)
assert.Equal(t, db.mmapSize(10000000), 20000768)
assert.Equal(t, db.mmapSize((1<<30)-1), 2147483648)
assert.Equal(t, db.mmapSize(1<<30), 1<<31)
} }
// Ensure that database pages are in expected order and type. // Ensure that database pages are in expected order and type.
func TestDB_Consistency(t *testing.T) { func TestDB_Consistency(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
})
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
}
db.Update(func(tx *Tx) error {
if p, _ := tx.Page(0); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(1); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(2); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(3); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(4); assert.NotNil(t, p) {
assert.Equal(t, "leaf", p.Type) // root leaf
}
if p, _ := tx.Page(5); assert.NotNil(t, p) {
assert.Equal(t, "freelist", p.Type)
}
p, _ := tx.Page(6)
assert.Nil(t, p)
return nil return nil
}) })
}) }
} db.Update(func(tx *bolt.Tx) error {
p, _ := tx.Page(0)
assert(t, p != nil, "")
equals(t, "meta", p.Type)
// Ensure that a database can return a string representation of itself. p, _ = tx.Page(1)
func TestDB_String(t *testing.T) { assert(t, p != nil, "")
db := &DB{path: "/foo/bar"} equals(t, "meta", p.Type)
assert.Equal(t, db.String(), `DB<"/foo/bar">`)
assert.Equal(t, db.GoString(), `bolt.DB{path:"/foo/bar"}`) p, _ = tx.Page(2)
assert(t, p != nil, "")
equals(t, "free", p.Type)
p, _ = tx.Page(3)
assert(t, p != nil, "")
equals(t, "free", p.Type)
p, _ = tx.Page(4)
assert(t, p != nil, "")
equals(t, "leaf", p.Type)
p, _ = tx.Page(5)
assert(t, p != nil, "")
equals(t, "freelist", p.Type)
p, _ = tx.Page(6)
assert(t, p == nil, "")
return nil
})
} }
// Ensure that DB stats can be substracted from one another. // Ensure that DB stats can be substracted from one another.
func TestDBStats_Sub(t *testing.T) { func TestDBStats_Sub(t *testing.T) {
var a, b Stats var a, b bolt.Stats
a.TxStats.PageCount = 3 a.TxStats.PageCount = 3
a.FreePageN = 4 a.FreePageN = 4
b.TxStats.PageCount = 10 b.TxStats.PageCount = 10
b.FreePageN = 14 b.FreePageN = 14
diff := b.Sub(&a) diff := b.Sub(&a)
assert.Equal(t, 7, diff.TxStats.PageCount) equals(t, 7, diff.TxStats.PageCount)
// free page stats are copied from the receiver and not subtracted // free page stats are copied from the receiver and not subtracted
assert.Equal(t, 14, diff.FreePageN) equals(t, 14, diff.FreePageN)
}
// Ensure that meta with bad magic is invalid.
func TestMeta_validate_magic(t *testing.T) {
m := &meta{magic: 0x01234567}
assert.Equal(t, m.validate(), ErrInvalid)
}
// Ensure that meta with a bad version is invalid.
func TestMeta_validate_version(t *testing.T) {
m := &meta{magic: magic, version: 200}
assert.Equal(t, m.validate(), ErrVersionMismatch)
}
// Ensure that a DB in strict mode will fail when corrupted.
func TestDB_StrictMode(t *testing.T) {
var msg string
func() {
defer func() {
msg = fmt.Sprintf("%s", recover())
}()
withOpenDB(func(db *DB, path string) {
db.StrictMode = true
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
// Corrupt the DB by extending the high water mark.
tx.meta.pgid++
return nil
})
})
}()
assert.Equal(t, "check fail: page 4: unreachable unfreed", msg)
}
// Ensure that a double freeing a page will result in a panic.
func TestDB_DoubleFree(t *testing.T) {
var msg string
func() {
defer func() {
msg = fmt.Sprintf("%s", recover())
}()
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
// Corrupt the DB by adding a page to the freelist.
db.freelist.free(0, tx.page(3))
return nil
})
})
}()
assert.Equal(t, "assertion failed: page 3 already freed", msg)
} }
func ExampleDB_Update() { func ExampleDB_Update() {
// Open the database. // Open the database.
db, _ := Open(tempfile(), 0666, nil) db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path()) defer os.Remove(db.Path())
defer db.Close() defer db.Close()
// Execute several commands within a write transaction. // Execute several commands within a write transaction.
err := db.Update(func(tx *Tx) error { err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
if err != nil { if err != nil {
return err return err
@ -500,7 +441,7 @@ func ExampleDB_Update() {
// If our transactional block didn't return an error then our data is saved. // If our transactional block didn't return an error then our data is saved.
if err == nil { if err == nil {
db.View(func(tx *Tx) error { db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value) fmt.Printf("The value of 'foo' is: %s\n", value)
return nil return nil
@ -513,12 +454,12 @@ func ExampleDB_Update() {
func ExampleDB_View() { func ExampleDB_View() {
// Open the database. // Open the database.
db, _ := Open(tempfile(), 0666, nil) db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path()) defer os.Remove(db.Path())
defer db.Close() defer db.Close()
// Insert data into a bucket. // Insert data into a bucket.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("people")) tx.CreateBucket([]byte("people"))
b := tx.Bucket([]byte("people")) b := tx.Bucket([]byte("people"))
b.Put([]byte("john"), []byte("doe")) b.Put([]byte("john"), []byte("doe"))
@ -527,7 +468,7 @@ func ExampleDB_View() {
}) })
// Access data from within a read-only transactional block. // Access data from within a read-only transactional block.
db.View(func(tx *Tx) error { db.View(func(tx *bolt.Tx) error {
v := tx.Bucket([]byte("people")).Get([]byte("john")) v := tx.Bucket([]byte("people")).Get([]byte("john"))
fmt.Printf("John's last name is %s.\n", v) fmt.Printf("John's last name is %s.\n", v)
return nil return nil
@ -539,12 +480,12 @@ func ExampleDB_View() {
func ExampleDB_Begin_ReadOnly() { func ExampleDB_Begin_ReadOnly() {
// Open the database. // Open the database.
db, _ := Open(tempfile(), 0666, nil) db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path()) defer os.Remove(db.Path())
defer db.Close() defer db.Close()
// Create a bucket. // Create a bucket.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
return err return err
}) })
@ -571,44 +512,54 @@ func ExampleDB_Begin_ReadOnly() {
// zephyr likes purple // zephyr likes purple
} }
// tempfile returns a temporary file path. // TestDB represents a wrapper around a Bolt DB to handle temporary file
func tempfile() string { // creation and automatic cleanup on close.
f, _ := ioutil.TempFile("", "bolt-") type TestDB struct {
f.Close() *bolt.DB
os.Remove(f.Name())
return f.Name()
} }
// withTempPath executes a function with a database reference. // NewTestDB returns a new instance of TestDB.
func withTempPath(fn func(string)) { func NewTestDB() *TestDB {
path := tempfile() db, err := bolt.Open(tempfile(), 0666, nil)
defer os.RemoveAll(path) if err != nil {
fn(path) panic("cannot open db: " + err.Error())
}
return &TestDB{db}
} }
// withOpenDB executes a function with an already opened database. // Close closes the database and deletes the underlying file.
func withOpenDB(fn func(*DB, string)) { func (db *TestDB) Close() {
withTempPath(func(path string) { // Log statistics.
db, err := Open(path, 0666, nil) if *statsFlag {
if err != nil { db.PrintStats()
panic("cannot open db: " + err.Error()) }
}
defer db.Close()
fn(db, path)
// Log statistics. // Check database consistency after every test.
if *statsFlag { db.MustCheck()
logStats(db)
}
// Check database consistency after every test. // Close database and remove file.
mustCheck(db) defer os.Remove(db.Path())
}) db.DB.Close()
} }
// mustCheck runs a consistency check on the database and panics if any errors are found. // PrintStats prints the database stats
func mustCheck(db *DB) { func (db *TestDB) PrintStats() {
db.View(func(tx *Tx) error { var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
// MustCheck runs a consistency check on the database and panics if any errors are found.
func (db *TestDB) MustCheck() {
db.View(func(tx *bolt.Tx) error {
// Collect all the errors. // Collect all the errors.
var errors []error var errors []error
for err := range tx.Check() { for err := range tx.Check() {
@ -640,8 +591,23 @@ func mustCheck(db *DB) {
}) })
} }
// CopyTempFile copies a database to a temporary file.
func (db *TestDB) CopyTempFile() {
path := tempfile()
db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys. // mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *Bucket, m map[string]string) { func mustContainKeys(b *bolt.Bucket, m map[string]string) {
found := make(map[string]string) found := make(map[string]string)
b.ForEach(func(k, _ []byte) error { b.ForEach(func(k, _ []byte) error {
found[string(k)] = "" found[string(k)] = ""
@ -679,29 +645,6 @@ func trunc(b []byte, length int) []byte {
return b return b
} }
// writes the current database stats to the testing log.
func logStats(db *DB) {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
func truncDuration(d time.Duration) string { func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
} }
// copyAndFailNow copies a database to a new location and then fails then test.
func copyAndFailNow(t *testing.T, db *DB) {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
t.FailNow()
}

View File

@ -1,24 +1,27 @@
package bolt package bolt
import ( import (
"reflect"
"testing" "testing"
"unsafe" "unsafe"
"github.com/stretchr/testify/assert"
) )
// Ensure that a page is added to a transaction's freelist. // Ensure that a page is added to a transaction's freelist.
func TestFreelist_free(t *testing.T) { func TestFreelist_free(t *testing.T) {
f := newFreelist() f := newFreelist()
f.free(100, &page{id: 12}) f.free(100, &page{id: 12})
assert.Equal(t, f.pending[100], []pgid{12}) if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
}
} }
// Ensure that a page and its overflow is added to a transaction's freelist. // Ensure that a page and its overflow is added to a transaction's freelist.
func TestFreelist_free_overflow(t *testing.T) { func TestFreelist_free_overflow(t *testing.T) {
f := newFreelist() f := newFreelist()
f.free(100, &page{id: 12, overflow: 3}) f.free(100, &page{id: 12, overflow: 3})
assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15}) if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
}
} }
// Ensure that a transaction's free pages can be released. // Ensure that a transaction's free pages can be released.
@ -29,25 +32,56 @@ func TestFreelist_release(t *testing.T) {
f.free(102, &page{id: 39}) f.free(102, &page{id: 39})
f.release(100) f.release(100)
f.release(101) f.release(101)
assert.Equal(t, []pgid{9, 12, 13}, f.ids) if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
f.release(102) f.release(102)
assert.Equal(t, []pgid{9, 12, 13, 39}, f.ids) if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
} }
// Ensure that a freelist can find contiguous blocks of pages. // Ensure that a freelist can find contiguous blocks of pages.
func TestFreelist_allocate(t *testing.T) { func TestFreelist_allocate(t *testing.T) {
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
assert.Equal(t, 3, int(f.allocate(3))) if id := int(f.allocate(3)); id != 3 {
assert.Equal(t, 6, int(f.allocate(1))) t.Fatalf("exp=3; got=%v", id)
assert.Equal(t, 0, int(f.allocate(3))) }
assert.Equal(t, 12, int(f.allocate(2))) if id := int(f.allocate(1)); id != 6 {
assert.Equal(t, 7, int(f.allocate(1))) t.Fatalf("exp=6; got=%v", id)
assert.Equal(t, 0, int(f.allocate(0))) }
assert.Equal(t, []pgid{9, 18}, f.ids) if id := int(f.allocate(3)); id != 0 {
assert.Equal(t, 9, int(f.allocate(1))) t.Fatalf("exp=0; got=%v", id)
assert.Equal(t, 18, int(f.allocate(1))) }
assert.Equal(t, 0, int(f.allocate(1))) if id := int(f.allocate(2)); id != 12 {
assert.Equal(t, []pgid{}, f.ids) t.Fatalf("exp=12; got=%v", id)
}
if id := int(f.allocate(1)); id != 7 {
t.Fatalf("exp=7; got=%v", id)
}
if id := int(f.allocate(0)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if id := int(f.allocate(0)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
if id := int(f.allocate(1)); id != 9 {
t.Fatalf("exp=9; got=%v", id)
}
if id := int(f.allocate(1)); id != 18 {
t.Fatalf("exp=18; got=%v", id)
}
if id := int(f.allocate(1)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
} }
// Ensure that a freelist can deserialize from a freelist page. // Ensure that a freelist can deserialize from a freelist page.
@ -68,9 +102,9 @@ func TestFreelist_read(t *testing.T) {
f.read(page) f.read(page)
// Ensure that there are two page ids in the freelist. // Ensure that there are two page ids in the freelist.
assert.Equal(t, len(f.ids), 2) if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
assert.Equal(t, f.ids[0], pgid(23)) t.Fatalf("exp=%v; got=%v", exp, f.ids)
assert.Equal(t, f.ids[1], pgid(50)) }
} }
// Ensure that a freelist can serialize into a freelist page. // Ensure that a freelist can serialize into a freelist page.
@ -89,10 +123,7 @@ func TestFreelist_write(t *testing.T) {
// Ensure that the freelist is correct. // Ensure that the freelist is correct.
// All pages should be present and in reverse order. // All pages should be present and in reverse order.
assert.Equal(t, len(f2.ids), 5) if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
assert.Equal(t, f2.ids[0], pgid(3)) t.Fatalf("exp=%v; got=%v", exp, f2.ids)
assert.Equal(t, f2.ids[1], pgid(11)) }
assert.Equal(t, f2.ids[2], pgid(12))
assert.Equal(t, f2.ids[3], pgid(28))
assert.Equal(t, f2.ids[4], pgid(39))
} }

View File

@ -337,7 +337,7 @@ func (n *node) spill() error {
for _, node := range nodes { for _, node := range nodes {
// Add node's page to the freelist if it's not new. // Add node's page to the freelist if it's not new.
if node.pgid > 0 { if node.pgid > 0 {
tx.db.freelist.free(tx.id(), tx.page(node.pgid)) tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
node.pgid = 0 node.pgid = 0
} }
@ -565,7 +565,7 @@ func (n *node) dereference() {
// free adds the node's underlying page to the freelist. // free adds the node's underlying page to the freelist.
func (n *node) free() { func (n *node) free() {
if n.pgid != 0 { if n.pgid != 0 {
n.bucket.tx.db.freelist.free(n.bucket.tx.id(), n.bucket.tx.page(n.pgid)) n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
n.pgid = 0 n.pgid = 0
} }
} }

View File

@ -3,8 +3,6 @@ package bolt
import ( import (
"testing" "testing"
"unsafe" "unsafe"
"github.com/stretchr/testify/assert"
) )
// Ensure that a node can insert a key/value. // Ensure that a node can insert a key/value.
@ -14,14 +12,22 @@ func TestNode_put(t *testing.T) {
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
assert.Equal(t, len(n.inodes), 3)
assert.Equal(t, n.inodes[0].key, []byte("bar")) if len(n.inodes) != 3 {
assert.Equal(t, n.inodes[0].value, []byte("1")) t.Fatalf("exp=3; got=%d", len(n.inodes))
assert.Equal(t, n.inodes[1].key, []byte("baz")) }
assert.Equal(t, n.inodes[1].value, []byte("2")) if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
assert.Equal(t, n.inodes[2].key, []byte("foo")) t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
assert.Equal(t, n.inodes[2].value, []byte("3")) }
assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag)) if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
}
if n.inodes[2].flags != uint32(leafPageFlag) {
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
}
} }
// Ensure that a node can deserialize from a leaf page. // Ensure that a node can deserialize from a leaf page.
@ -47,12 +53,18 @@ func TestNode_read_LeafPage(t *testing.T) {
n.read(page) n.read(page)
// Check that there are two inodes with correct data. // Check that there are two inodes with correct data.
assert.True(t, n.isLeaf) if !n.isLeaf {
assert.Equal(t, len(n.inodes), 2) t.Fatal("expected leaf")
assert.Equal(t, n.inodes[0].key, []byte("bar")) }
assert.Equal(t, n.inodes[0].value, []byte("fooz")) if len(n.inodes) != 2 {
assert.Equal(t, n.inodes[1].key, []byte("helloworld")) t.Fatalf("exp=2; got=%d", len(n.inodes))
assert.Equal(t, n.inodes[1].value, []byte("bye")) }
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
}
} }
// Ensure that a node can serialize into a leaf page. // Ensure that a node can serialize into a leaf page.
@ -73,13 +85,18 @@ func TestNode_write_LeafPage(t *testing.T) {
n2.read(p) n2.read(p)
// Check that the two pages are the same. // Check that the two pages are the same.
assert.Equal(t, len(n2.inodes), 3) if len(n2.inodes) != 3 {
assert.Equal(t, n2.inodes[0].key, []byte("john")) t.Fatalf("exp=3; got=%d", len(n2.inodes))
assert.Equal(t, n2.inodes[0].value, []byte("johnson")) }
assert.Equal(t, n2.inodes[1].key, []byte("ricki")) if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
assert.Equal(t, n2.inodes[1].value, []byte("lake")) t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
assert.Equal(t, n2.inodes[2].key, []byte("susy")) }
assert.Equal(t, n2.inodes[2].value, []byte("que")) if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
}
if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
}
} }
// Ensure that a node can split into appropriate subgroups. // Ensure that a node can split into appropriate subgroups.
@ -96,9 +113,15 @@ func TestNode_split(t *testing.T) {
n.split(100) n.split(100)
var parent = n.parent var parent = n.parent
assert.Equal(t, len(parent.children), 2) if len(parent.children) != 2 {
assert.Equal(t, len(parent.children[0].inodes), 2) t.Fatalf("exp=2; got=%d", len(parent.children))
assert.Equal(t, len(parent.children[1].inodes), 3) }
if len(parent.children[0].inodes) != 2 {
t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
}
if len(parent.children[1].inodes) != 3 {
t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
}
} }
// Ensure that a page with the minimum number of inodes just returns a single node. // Ensure that a page with the minimum number of inodes just returns a single node.
@ -110,7 +133,9 @@ func TestNode_split_MinKeys(t *testing.T) {
// Split. // Split.
n.split(20) n.split(20)
assert.Nil(t, n.parent) if n.parent != nil {
t.Fatalf("expected nil parent")
}
} }
// Ensure that a node that has keys that all fit on a page just returns one leaf. // Ensure that a node that has keys that all fit on a page just returns one leaf.
@ -125,5 +150,7 @@ func TestNode_split_SinglePage(t *testing.T) {
// Split. // Split.
n.split(4096) n.split(4096)
assert.Nil(t, n.parent) if n.parent != nil {
t.Fatalf("expected nil parent")
}
} }

View File

@ -1,17 +1,26 @@
package bolt package bolt
import ( import (
"github.com/stretchr/testify/assert"
"testing" "testing"
) )
// Ensure that the page type can be returned in human readable format. // Ensure that the page type can be returned in human readable format.
func TestPage_typ(t *testing.T) { func TestPage_typ(t *testing.T) {
assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch") if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf") t.Fatalf("exp=branch; got=%v", typ)
assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta") }
assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist") if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>") t.Fatalf("exp=leaf; got=%v", typ)
}
if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
t.Fatalf("exp=meta; got=%v", typ)
}
if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
t.Fatalf("exp=freelist; got=%v", typ)
}
if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
t.Fatalf("exp=unknown<4e20>; got=%v", typ)
}
} }
// Ensure that the hexdump debugging function doesn't blow up. // Ensure that the hexdump debugging function doesn't blow up.

View File

@ -1,9 +1,11 @@
package bolt package bolt_test
import ( import (
"bytes" "bytes"
"flag" "flag"
"fmt"
"math/rand" "math/rand"
"os"
"reflect" "reflect"
"testing/quick" "testing/quick"
"time" "time"
@ -28,8 +30,8 @@ func init() {
flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "")
flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
flag.Parse() flag.Parse()
warn("seed:", qseed) fmt.Fprintln(os.Stderr, "seed:", qseed)
warnf("quick settings: count=%v, items=%v, ksize=%v, vsize=%v", qcount, qmaxitems, qmaxksize, qmaxvsize) fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
} }
func qconfig() *quick.Config { func qconfig() *quick.Config {

View File

@ -1,4 +1,4 @@
package bolt package bolt_test
import ( import (
"bytes" "bytes"
@ -7,7 +7,7 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/boltdb/bolt"
) )
func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
@ -39,86 +39,88 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
var readerHandlers = []simulateHandler{simulateGetHandler} var readerHandlers = []simulateHandler{simulateGetHandler}
var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
var versions = make(map[txid]*QuickDB) var versions = make(map[int]*QuickDB)
versions[1] = NewQuickDB() versions[1] = NewQuickDB()
withOpenDB(func(db *DB, path string) {
var mutex sync.Mutex
// Run n threads in parallel, each with their own operation. db := NewTestDB()
var wg sync.WaitGroup defer db.Close()
var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
// Choose an operation to execute. var mutex sync.Mutex
var handler simulateHandler
if writable {
handler = writerHandlers[rand.Intn(len(writerHandlers))]
} else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
}
// Execute a thread for the given operation. // Run n threads in parallel, each with their own operation.
go func(writable bool, handler simulateHandler) { var wg sync.WaitGroup
defer wg.Done() var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
// Start transaction. // Choose an operation to execute.
tx, err := db.Begin(writable) var handler simulateHandler
if err != nil { if writable {
t.Fatal("tx begin: ", err) handler = writerHandlers[rand.Intn(len(writerHandlers))]
} } else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.id()]
if writable {
qdb = versions[tx.id()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.id()] = qdb
mutex.Unlock()
assert.NoError(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
} }
// Wait until all threads are done. // Execute a thread for the given operation.
wg.Wait() go func(writable bool, handler simulateHandler) {
}) defer wg.Done()
// Start transaction.
tx, err := db.Begin(writable)
if err != nil {
t.Fatal("tx begin: ", err)
}
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.ID()]
if writable {
qdb = versions[tx.ID()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.ID()] = qdb
mutex.Unlock()
ok(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
}
// Wait until all threads are done.
wg.Wait()
} }
type simulateHandler func(tx *Tx, qdb *QuickDB) type simulateHandler func(tx *bolt.Tx, qdb *QuickDB)
// Retrieves a key from the database and verifies that it is what is expected. // Retrieves a key from the database and verifies that it is what is expected.
func simulateGetHandler(tx *Tx, qdb *QuickDB) { func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) {
// Randomly retrieve an existing exist. // Randomly retrieve an existing exist.
keys := qdb.Rand() keys := qdb.Rand()
if len(keys) == 0 { if len(keys) == 0 {
@ -153,7 +155,7 @@ func simulateGetHandler(tx *Tx, qdb *QuickDB) {
} }
// Inserts a key into the database. // Inserts a key into the database.
func simulatePutHandler(tx *Tx, qdb *QuickDB) { func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) {
var err error var err error
keys, value := randKeys(), randValue() keys, value := randKeys(), randValue()

10
tx.go
View File

@ -52,9 +52,9 @@ func (tx *Tx) init(db *DB) {
} }
} }
// id returns the transaction id. // ID returns the transaction id.
func (tx *Tx) id() txid { func (tx *Tx) ID() int {
return tx.meta.txid return int(tx.meta.txid)
} }
// DB returns a reference to the database that created the transaction. // DB returns a reference to the database that created the transaction.
@ -158,7 +158,7 @@ func (tx *Tx) Commit() error {
// Free the freelist and allocate new pages for it. This will overestimate // Free the freelist and allocate new pages for it. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad). // the size of the freelist but not underestimate the size (which would be bad).
tx.db.freelist.free(tx.id(), tx.db.page(tx.meta.freelist)) tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil { if err != nil {
tx.rollback() tx.rollback()
@ -218,7 +218,7 @@ func (tx *Tx) rollback() {
return return
} }
if tx.writable { if tx.writable {
tx.db.freelist.rollback(tx.id()) tx.db.freelist.rollback(tx.meta.txid)
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
} }
tx.close() tx.close()

View File

@ -1,4 +1,4 @@
package bolt package bolt_test
import ( import (
"errors" "errors"
@ -6,310 +6,302 @@ import (
"os" "os"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/boltdb/bolt"
) )
// Ensure that committing a closed transaction returns an error. // Ensure that committing a closed transaction returns an error.
func TestTx_Commit_Closed(t *testing.T) { func TestTx_Commit_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
tx.CreateBucket([]byte("foo")) tx, _ := db.Begin(true)
assert.NoError(t, tx.Commit()) tx.CreateBucket([]byte("foo"))
assert.Equal(t, tx.Commit(), ErrTxClosed) ok(t, tx.Commit())
}) equals(t, tx.Commit(), bolt.ErrTxClosed)
} }
// Ensure that rolling back a closed transaction returns an error. // Ensure that rolling back a closed transaction returns an error.
func TestTx_Rollback_Closed(t *testing.T) { func TestTx_Rollback_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
assert.NoError(t, tx.Rollback()) tx, _ := db.Begin(true)
assert.Equal(t, tx.Rollback(), ErrTxClosed) ok(t, tx.Rollback())
}) equals(t, tx.Rollback(), bolt.ErrTxClosed)
} }
// Ensure that committing a read-only transaction returns an error. // Ensure that committing a read-only transaction returns an error.
func TestTx_Commit_ReadOnly(t *testing.T) { func TestTx_Commit_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(false) defer db.Close()
assert.Equal(t, tx.Commit(), ErrTxNotWritable) tx, _ := db.Begin(false)
}) equals(t, tx.Commit(), bolt.ErrTxNotWritable)
} }
// Ensure that a transaction can retrieve a cursor on the root bucket. // Ensure that a transaction can retrieve a cursor on the root bucket.
func TestTx_Cursor(t *testing.T) { func TestTx_Cursor(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("woojits")) tx.CreateBucket([]byte("widgets"))
c := tx.Cursor() tx.CreateBucket([]byte("woojits"))
c := tx.Cursor()
k, v := c.First() k, v := c.First()
assert.Equal(t, "widgets", string(k)) equals(t, "widgets", string(k))
assert.Nil(t, v) assert(t, v == nil, "")
k, v = c.Next() k, v = c.Next()
assert.Equal(t, "woojits", string(k)) equals(t, "woojits", string(k))
assert.Nil(t, v) assert(t, v == nil, "")
k, v = c.Next() k, v = c.Next()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
return nil return nil
})
}) })
} }
// Ensure that creating a bucket with a read-only transaction returns an error. // Ensure that creating a bucket with a read-only transaction returns an error.
func TestTx_CreateBucket_ReadOnly(t *testing.T) { func TestTx_CreateBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.View(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("foo")) db.View(func(tx *bolt.Tx) error {
assert.Nil(t, b) b, err := tx.CreateBucket([]byte("foo"))
assert.Equal(t, ErrTxNotWritable, err) assert(t, b == nil, "")
return nil equals(t, bolt.ErrTxNotWritable, err)
}) return nil
}) })
} }
// Ensure that creating a bucket on a closed transaction returns an error. // Ensure that creating a bucket on a closed transaction returns an error.
func TestTx_CreateBucket_Closed(t *testing.T) { func TestTx_CreateBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
tx.Commit() tx, _ := db.Begin(true)
b, err := tx.CreateBucket([]byte("foo")) tx.Commit()
assert.Nil(t, b) b, err := tx.CreateBucket([]byte("foo"))
assert.Equal(t, ErrTxClosed, err) assert(t, b == nil, "")
}) equals(t, bolt.ErrTxClosed, err)
} }
// Ensure that a Tx can retrieve a bucket. // Ensure that a Tx can retrieve a bucket.
func TestTx_Bucket(t *testing.T) { func TestTx_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) b := tx.Bucket([]byte("widgets"))
return nil assert(t, b != nil, "")
}) return nil
}) })
} }
// Ensure that a Tx retrieving a non-existent key returns nil. // Ensure that a Tx retrieving a non-existent key returns nil.
func TestTx_Get_Missing(t *testing.T) { func TestTx_Get_Missing(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
assert.Nil(t, value) value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
return nil assert(t, value == nil, "")
}) return nil
}) })
} }
// Ensure that a bucket can be created and retrieved. // Ensure that a bucket can be created and retrieved.
func TestTx_CreateBucket(t *testing.T) { func TestTx_CreateBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Create a bucket. defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
return nil
})
// Read the bucket through a separate transaction. // Create a bucket.
db.View(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
return nil ok(t, err)
}) return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets"))
assert(t, b != nil, "")
return nil
}) })
} }
// Ensure that a bucket can be created if it doesn't already exist. // Ensure that a bucket can be created if it doesn't already exist.
func TestTx_CreateBucketIfNotExists(t *testing.T) { func TestTx_CreateBucketIfNotExists(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucketIfNotExists([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
assert.NotNil(t, b) b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NoError(t, err) assert(t, b != nil, "")
ok(t, err)
b, err = tx.CreateBucketIfNotExists([]byte("widgets")) b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
assert.NoError(t, err) ok(t, err)
b, err = tx.CreateBucketIfNotExists([]byte{}) b, err = tx.CreateBucketIfNotExists([]byte{})
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, ErrBucketNameRequired, err) equals(t, bolt.ErrBucketNameRequired, err)
b, err = tx.CreateBucketIfNotExists(nil) b, err = tx.CreateBucketIfNotExists(nil)
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, ErrBucketNameRequired, err) equals(t, bolt.ErrBucketNameRequired, err)
return nil return nil
}) })
// Read the bucket through a separate transaction. // Read the bucket through a separate transaction.
db.View(func(tx *Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
return nil return nil
})
}) })
} }
// Ensure that a bucket cannot be created twice. // Ensure that a bucket cannot be created twice.
func TestTx_CreateBucket_Exists(t *testing.T) { func TestTx_CreateBucket_Exists(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Create a bucket. defer db.Close()
db.Update(func(tx *Tx) error { // Create a bucket.
b, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
assert.NotNil(t, b) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) assert(t, b != nil, "")
return nil ok(t, err)
}) return nil
})
// Create the same bucket again. // Create the same bucket again.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, ErrBucketExists, err) equals(t, bolt.ErrBucketExists, err)
return nil return nil
})
}) })
} }
// Ensure that a bucket is created with a non-blank name. // Ensure that a bucket is created with a non-blank name.
func TestTx_CreateBucket_NameRequired(t *testing.T) { func TestTx_CreateBucket_NameRequired(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket(nil) db.Update(func(tx *bolt.Tx) error {
assert.Nil(t, b) b, err := tx.CreateBucket(nil)
assert.Equal(t, ErrBucketNameRequired, err) assert(t, b == nil, "")
return nil equals(t, bolt.ErrBucketNameRequired, err)
}) return nil
}) })
} }
// Ensure that a bucket can be deleted. // Ensure that a bucket can be deleted.
func TestTx_DeleteBucket(t *testing.T) { func TestTx_DeleteBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Create a bucket and add a value. defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
// Save root page id. // Create a bucket and add a value.
var root pgid db.Update(func(tx *bolt.Tx) error {
db.View(func(tx *Tx) error { tx.CreateBucket([]byte("widgets"))
root = tx.Bucket([]byte("widgets")).root tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil return nil
}) })
// Delete the bucket and make sure we can't get the value. // Delete the bucket and make sure we can't get the value.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) ok(t, tx.DeleteBucket([]byte("widgets")))
assert.Nil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) == nil, "")
return nil return nil
}) })
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
// Verify that the bucket's page is free. // Create the bucket again and make sure there's not a phantom value.
assert.Equal(t, []pgid{4, 5}, db.freelist.all()) b, err := tx.CreateBucket([]byte("widgets"))
assert(t, b != nil, "")
// Create the bucket again and make sure there's not a phantom value. ok(t, err)
b, err := tx.CreateBucket([]byte("widgets")) assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
assert.NotNil(t, b) return nil
assert.NoError(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
})
}) })
} }
// Ensure that deleting a bucket on a closed transaction returns an error. // Ensure that deleting a bucket on a closed transaction returns an error.
func TestTx_DeleteBucket_Closed(t *testing.T) { func TestTx_DeleteBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
tx.Commit() tx, _ := db.Begin(true)
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed) tx.Commit()
}) equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed)
} }
// Ensure that deleting a bucket with a read-only transaction returns an error. // Ensure that deleting a bucket with a read-only transaction returns an error.
func TestTx_DeleteBucket_ReadOnly(t *testing.T) { func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.View(func(tx *Tx) error { defer db.Close()
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable) db.View(func(tx *bolt.Tx) error {
return nil equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable)
}) return nil
}) })
} }
// Ensure that nothing happens when deleting a bucket that doesn't exist. // Ensure that nothing happens when deleting a bucket that doesn't exist.
func TestTx_DeleteBucket_NotFound(t *testing.T) { func TestTx_DeleteBucket_NotFound(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) db.Update(func(tx *bolt.Tx) error {
return nil equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
}) return nil
}) })
} }
// Ensure that Tx commit handlers are called after a transaction successfully commits. // Ensure that Tx commit handlers are called after a transaction successfully commits.
func TestTx_OnCommit(t *testing.T) { func TestTx_OnCommit(t *testing.T) {
var x int var x int
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.OnCommit(func() { x += 1 }) db.Update(func(tx *bolt.Tx) error {
tx.OnCommit(func() { x += 2 }) tx.OnCommit(func() { x += 1 })
_, err := tx.CreateBucket([]byte("widgets")) tx.OnCommit(func() { x += 2 })
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
}) })
assert.Equal(t, 3, x) equals(t, 3, x)
} }
// Ensure that Tx commit handlers are NOT called after a transaction rolls back. // Ensure that Tx commit handlers are NOT called after a transaction rolls back.
func TestTx_OnCommit_Rollback(t *testing.T) { func TestTx_OnCommit_Rollback(t *testing.T) {
var x int var x int
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.OnCommit(func() { x += 1 }) db.Update(func(tx *bolt.Tx) error {
tx.OnCommit(func() { x += 2 }) tx.OnCommit(func() { x += 1 })
tx.CreateBucket([]byte("widgets")) tx.OnCommit(func() { x += 2 })
return errors.New("rollback this commit") tx.CreateBucket([]byte("widgets"))
}) return errors.New("rollback this commit")
}) })
assert.Equal(t, 0, x) equals(t, 0, x)
} }
// Ensure that the database can be copied to a file path. // Ensure that the database can be copied to a file path.
func TestTx_CopyFile(t *testing.T) { func TestTx_CopyFile(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
var dest = tempfile() defer db.Close()
db.Update(func(tx *Tx) error { var dest = tempfile()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
}) return nil
})
assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) })) ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) }))
db2, err := Open(dest, 0600, nil) db2, err := bolt.Open(dest, 0600, nil)
assert.NoError(t, err) ok(t, err)
defer db2.Close() defer db2.Close()
db2.View(func(tx *Tx) error { db2.View(func(tx *bolt.Tx) error {
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil return nil
})
}) })
} }
@ -336,48 +328,48 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
// Ensure that Copy handles write errors right. // Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Meta(t *testing.T) { func TestTx_CopyFile_Error_Meta(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
}) return nil
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests")
}) })
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) })
equals(t, err.Error(), "meta copy: error injected for tests")
} }
// Ensure that Copy handles write errors right. // Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Normal(t *testing.T) { func TestTx_CopyFile_Error_Normal(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *bolt.Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
}) return nil
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
assert.EqualError(t, err, "error injected for tests")
}) })
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) })
equals(t, err.Error(), "error injected for tests")
} }
func ExampleTx_Rollback() { func ExampleTx_Rollback() {
// Open the database. // Open the database.
db, _ := Open(tempfile(), 0666, nil) db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path()) defer os.Remove(db.Path())
defer db.Close() defer db.Close()
// Create a bucket. // Create a bucket.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
return err return err
}) })
// Set a value for a key. // Set a value for a key.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
}) })
@ -388,7 +380,7 @@ func ExampleTx_Rollback() {
tx.Rollback() tx.Rollback()
// Ensure that our original value is still set. // Ensure that our original value is still set.
db.View(func(tx *Tx) error { db.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' is still: %s\n", value) fmt.Printf("The value for 'foo' is still: %s\n", value)
return nil return nil
@ -400,12 +392,12 @@ func ExampleTx_Rollback() {
func ExampleTx_CopyFile() { func ExampleTx_CopyFile() {
// Open the database. // Open the database.
db, _ := Open(tempfile(), 0666, nil) db, _ := bolt.Open(tempfile(), 0666, nil)
defer os.Remove(db.Path()) defer os.Remove(db.Path())
defer db.Close() defer db.Close()
// Create a bucket and a key. // Create a bucket and a key.
db.Update(func(tx *Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil return nil
@ -413,15 +405,15 @@ func ExampleTx_CopyFile() {
// Copy the database to another file. // Copy the database to another file.
toFile := tempfile() toFile := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(toFile, 0666) }) db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) })
defer os.Remove(toFile) defer os.Remove(toFile)
// Open the cloned database. // Open the cloned database.
db2, _ := Open(toFile, 0666, nil) db2, _ := bolt.Open(toFile, 0666, nil)
defer db2.Close() defer db2.Close()
// Ensure that the key exists in the copy. // Ensure that the key exists in the copy.
db2.View(func(tx *Tx) error { db2.View(func(tx *bolt.Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' in the clone is: %s\n", value) fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
return nil return nil