mirror of
https://github.com/etcd-io/bbolt.git
synced 2025-05-31 11:42:30 +00:00
commit
defbfd35af
36
bolt_test.go
Normal file
36
bolt_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package bolt_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// assert fails the test if the condition is false.
|
||||
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
|
||||
if !condition {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// ok fails the test if an err is not nil.
|
||||
func ok(tb testing.TB, err error) {
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// equals fails the test if exp is not equal to act.
|
||||
func equals(tb testing.TB, exp, act interface{}) {
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
@ -634,7 +634,7 @@ func (b *Bucket) free() {
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.id(), p)
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
|
1446
bucket_test.go
1446
bucket_test.go
File diff suppressed because it is too large
Load Diff
@ -283,7 +283,7 @@ func benchStartProfiling(options *BenchOptions) {
|
||||
if options.CPUProfile != "" {
|
||||
cpuprofile, err = os.Create(options.CPUProfile)
|
||||
if err != nil {
|
||||
fatal("bench: could not create cpu profile %q: %v", options.CPUProfile, err)
|
||||
fatalf("bench: could not create cpu profile %q: %v", options.CPUProfile, err)
|
||||
}
|
||||
pprof.StartCPUProfile(cpuprofile)
|
||||
}
|
||||
@ -292,7 +292,7 @@ func benchStartProfiling(options *BenchOptions) {
|
||||
if options.MemProfile != "" {
|
||||
memprofile, err = os.Create(options.MemProfile)
|
||||
if err != nil {
|
||||
fatal("bench: could not create memory profile %q: %v", options.MemProfile, err)
|
||||
fatalf("bench: could not create memory profile %q: %v", options.MemProfile, err)
|
||||
}
|
||||
runtime.MemProfileRate = 4096
|
||||
}
|
||||
@ -301,7 +301,7 @@ func benchStartProfiling(options *BenchOptions) {
|
||||
if options.BlockProfile != "" {
|
||||
blockprofile, err = os.Create(options.BlockProfile)
|
||||
if err != nil {
|
||||
fatal("bench: could not create block profile %q: %v", options.BlockProfile, err)
|
||||
fatalf("bench: could not create block profile %q: %v", options.BlockProfile, err)
|
||||
}
|
||||
runtime.SetBlockProfileRate(1)
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a list of buckets can be retrieved.
|
||||
@ -20,7 +19,7 @@ func TestBuckets(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("buckets", path)
|
||||
assert.Equal(t, "whatchits\nwidgets\nwoojits", output)
|
||||
equals(t, "whatchits\nwidgets\nwoojits", output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -28,5 +27,5 @@ func TestBuckets(t *testing.T) {
|
||||
func TestBucketsDBNotFound(t *testing.T) {
|
||||
SetTestMode(true)
|
||||
output := run("buckets", "no/such/db")
|
||||
assert.Equal(t, "stat no/such/db: no such file or directory", output)
|
||||
equals(t, "stat no/such/db: no such file or directory", output)
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func Export(path string) {
|
||||
// Encode all buckets into JSON.
|
||||
output, err := json.Marshal(root)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encode: ", err)
|
||||
return fmt.Errorf("encode: %s", err)
|
||||
}
|
||||
print(string(output))
|
||||
return nil
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a database can be exported.
|
||||
@ -32,7 +31,7 @@ func TestExport(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("export", path)
|
||||
assert.Equal(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output)
|
||||
equals(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -40,5 +39,5 @@ func TestExport(t *testing.T) {
|
||||
func TestExport_NotFound(t *testing.T) {
|
||||
SetTestMode(true)
|
||||
output := run("export", "no/such/db")
|
||||
assert.Equal(t, "stat no/such/db: no such file or directory", output)
|
||||
equals(t, "stat no/such/db: no such file or directory", output)
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a value can be retrieved from the CLI.
|
||||
@ -19,7 +18,7 @@ func TestGet(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("get", path, "widgets", "foo")
|
||||
assert.Equal(t, "bar", output)
|
||||
equals(t, "bar", output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -27,7 +26,7 @@ func TestGet(t *testing.T) {
|
||||
func TestGetDBNotFound(t *testing.T) {
|
||||
SetTestMode(true)
|
||||
output := run("get", "no/such/db", "widgets", "foo")
|
||||
assert.Equal(t, "stat no/such/db: no such file or directory", output)
|
||||
equals(t, "stat no/such/db: no such file or directory", output)
|
||||
}
|
||||
|
||||
// Ensure that an error is reported if the bucket is not found.
|
||||
@ -36,7 +35,7 @@ func TestGetBucketNotFound(t *testing.T) {
|
||||
open(func(db *bolt.DB, path string) {
|
||||
db.Close()
|
||||
output := run("get", path, "widgets", "foo")
|
||||
assert.Equal(t, "bucket not found: widgets", output)
|
||||
equals(t, "bucket not found: widgets", output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -50,6 +49,6 @@ func TestGetKeyNotFound(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("get", path, "widgets", "foo")
|
||||
assert.Equal(t, "key not found: foo", output)
|
||||
equals(t, "key not found: foo", output)
|
||||
})
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a database can be imported.
|
||||
@ -15,32 +14,30 @@ func TestImport(t *testing.T) {
|
||||
|
||||
// Write input file.
|
||||
input := tempfile()
|
||||
assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600))
|
||||
ok(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600))
|
||||
|
||||
// Import database.
|
||||
path := tempfile()
|
||||
output := run("import", path, "--input", input)
|
||||
assert.Equal(t, ``, output)
|
||||
equals(t, ``, output)
|
||||
|
||||
// Open database and verify contents.
|
||||
db, err := bolt.Open(path, 0600, nil)
|
||||
assert.NoError(t, err)
|
||||
ok(t, err)
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
assert.NotNil(t, tx.Bucket([]byte("empty")))
|
||||
assert(t, tx.Bucket([]byte("empty")) != nil, "")
|
||||
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
if assert.NotNil(t, b) {
|
||||
assert.Equal(t, []byte("0000"), b.Get([]byte("foo")))
|
||||
assert.Equal(t, []byte(""), b.Get([]byte("bar")))
|
||||
}
|
||||
assert(t, b != nil, "")
|
||||
equals(t, []byte("0000"), b.Get([]byte("foo")))
|
||||
equals(t, []byte(""), b.Get([]byte("bar")))
|
||||
|
||||
b = tx.Bucket([]byte("woojits"))
|
||||
if assert.NotNil(t, b) {
|
||||
assert.Equal(t, []byte("XXXX"), b.Get([]byte("baz")))
|
||||
assert(t, b != nil, "")
|
||||
equals(t, []byte("XXXX"), b.Get([]byte("baz")))
|
||||
|
||||
b = b.Bucket([]byte("woojits/subbucket"))
|
||||
assert.Equal(t, []byte("A"), b.Get([]byte("bat")))
|
||||
}
|
||||
b = b.Bucket([]byte("woojits/subbucket"))
|
||||
equals(t, []byte("A"), b.Get([]byte("bat")))
|
||||
|
||||
return nil
|
||||
})
|
||||
@ -51,5 +48,5 @@ func TestImport(t *testing.T) {
|
||||
func TestImport_NotFound(t *testing.T) {
|
||||
SetTestMode(true)
|
||||
output := run("import", "path/to/db", "--input", "no/such/file")
|
||||
assert.Equal(t, "open no/such/file: no such file or directory", output)
|
||||
equals(t, "open no/such/file: no such file or directory", output)
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a database info can be printed.
|
||||
@ -20,7 +19,7 @@ func TestInfo(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("info", path)
|
||||
assert.Equal(t, `Page Size: 4096`, output)
|
||||
equals(t, `Page Size: 4096`, output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -28,5 +27,5 @@ func TestInfo(t *testing.T) {
|
||||
func TestInfo_NotFound(t *testing.T) {
|
||||
SetTestMode(true)
|
||||
output := run("info", "no/such/db")
|
||||
assert.Equal(t, "stat no/such/db: no such file or directory", output)
|
||||
equals(t, "stat no/such/db: no such file or directory", output)
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a list of keys can be retrieved for a given bucket.
|
||||
@ -21,7 +20,7 @@ func TestKeys(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("keys", path, "widgets")
|
||||
assert.Equal(t, "0001\n0002\n0003", output)
|
||||
equals(t, "0001\n0002\n0003", output)
|
||||
})
|
||||
}
|
||||
|
||||
@ -29,7 +28,7 @@ func TestKeys(t *testing.T) {
|
||||
func TestKeysDBNotFound(t *testing.T) {
|
||||
SetTestMode(true)
|
||||
output := run("keys", "no/such/db", "widgets")
|
||||
assert.Equal(t, "stat no/such/db: no such file or directory", output)
|
||||
equals(t, "stat no/such/db: no such file or directory", output)
|
||||
}
|
||||
|
||||
// Ensure that an error is reported if the bucket is not found.
|
||||
@ -38,6 +37,6 @@ func TestKeysBucketNotFound(t *testing.T) {
|
||||
open(func(db *bolt.DB, path string) {
|
||||
db.Close()
|
||||
output := run("keys", path, "widgets")
|
||||
assert.Equal(t, "bucket not found: widgets", output)
|
||||
equals(t, "bucket not found: widgets", output)
|
||||
})
|
||||
}
|
||||
|
@ -1,9 +1,14 @@
|
||||
package main_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
@ -35,3 +40,30 @@ func tempfile() string {
|
||||
os.Remove(f.Name())
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
// assert fails the test if the condition is false.
|
||||
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
|
||||
if !condition {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// ok fails the test if an err is not nil.
|
||||
func ok(tb testing.TB, err error) {
|
||||
if err != nil {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// equals fails the test if exp is not equal to act.
|
||||
func equals(tb testing.TB, exp, act interface{}) {
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
|
||||
tb.FailNow()
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
. "github.com/boltdb/bolt/cmd/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStats(t *testing.T) {
|
||||
@ -40,7 +39,7 @@ func TestStats(t *testing.T) {
|
||||
})
|
||||
db.Close()
|
||||
output := run("stats", path, "b")
|
||||
assert.Equal(t, "Aggregate statistics for 2 buckets\n\n"+
|
||||
equals(t, "Aggregate statistics for 2 buckets\n\n"+
|
||||
"Page count statistics\n"+
|
||||
"\tNumber of logical branch pages: 0\n"+
|
||||
"\tNumber of physical branch overflow pages: 0\n"+
|
||||
|
632
cursor_test.go
632
cursor_test.go
@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -7,103 +7,104 @@ import (
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Ensure that a cursor can return a reference to the bucket that created it.
|
||||
func TestCursor_Bucket(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, _ := tx.CreateBucket([]byte("widgets"))
|
||||
c := b.Cursor()
|
||||
assert.Equal(t, b, c.Bucket())
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, _ := tx.CreateBucket([]byte("widgets"))
|
||||
c := b.Cursor()
|
||||
equals(t, b, c.Bucket())
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can seek to the appropriate keys.
|
||||
func TestCursor_Seek(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
|
||||
assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
|
||||
assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
|
||||
_, err = b.CreateBucket([]byte("bkt"))
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
ok(t, err)
|
||||
ok(t, b.Put([]byte("foo"), []byte("0001")))
|
||||
ok(t, b.Put([]byte("bar"), []byte("0002")))
|
||||
ok(t, b.Put([]byte("baz"), []byte("0003")))
|
||||
_, err = b.CreateBucket([]byte("bkt"))
|
||||
ok(t, err)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
// Exact match should go to the key.
|
||||
k, v := c.Seek([]byte("bar"))
|
||||
assert.Equal(t, []byte("bar"), k)
|
||||
assert.Equal(t, []byte("0002"), v)
|
||||
// Exact match should go to the key.
|
||||
k, v := c.Seek([]byte("bar"))
|
||||
equals(t, []byte("bar"), k)
|
||||
equals(t, []byte("0002"), v)
|
||||
|
||||
// Inexact match should go to the next key.
|
||||
k, v = c.Seek([]byte("bas"))
|
||||
assert.Equal(t, []byte("baz"), k)
|
||||
assert.Equal(t, []byte("0003"), v)
|
||||
// Inexact match should go to the next key.
|
||||
k, v = c.Seek([]byte("bas"))
|
||||
equals(t, []byte("baz"), k)
|
||||
equals(t, []byte("0003"), v)
|
||||
|
||||
// Low key should go to the first key.
|
||||
k, v = c.Seek([]byte(""))
|
||||
assert.Equal(t, []byte("bar"), k)
|
||||
assert.Equal(t, []byte("0002"), v)
|
||||
// Low key should go to the first key.
|
||||
k, v = c.Seek([]byte(""))
|
||||
equals(t, []byte("bar"), k)
|
||||
equals(t, []byte("0002"), v)
|
||||
|
||||
// High key should return no key.
|
||||
k, v = c.Seek([]byte("zzz"))
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
// High key should return no key.
|
||||
k, v = c.Seek([]byte("zzz"))
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
|
||||
// Buckets should return their key but no value.
|
||||
k, v = c.Seek([]byte("bkt"))
|
||||
assert.Equal(t, []byte("bkt"), k)
|
||||
assert.Nil(t, v)
|
||||
// Buckets should return their key but no value.
|
||||
k, v = c.Seek([]byte("bkt"))
|
||||
equals(t, []byte("bkt"), k)
|
||||
assert(t, v == nil, "")
|
||||
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestCursor_Delete(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
var count = 1000
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Insert every other key between 0 and $count.
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, _ := tx.CreateBucket([]byte("widgets"))
|
||||
for i := 0; i < count; i += 1 {
|
||||
k := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(k, uint64(i))
|
||||
b.Put(k, make([]byte, 100))
|
||||
var count = 1000
|
||||
|
||||
// Insert every other key between 0 and $count.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, _ := tx.CreateBucket([]byte("widgets"))
|
||||
for i := 0; i < count; i += 1 {
|
||||
k := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(k, uint64(i))
|
||||
b.Put(k, make([]byte, 100))
|
||||
}
|
||||
b.CreateBucket([]byte("sub"))
|
||||
return nil
|
||||
})
|
||||
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
bound := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bound, uint64(count/2))
|
||||
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
|
||||
if err := c.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.CreateBucket([]byte("sub"))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
c.Seek([]byte("sub"))
|
||||
err := c.Delete()
|
||||
equals(t, err, bolt.ErrIncompatibleValue)
|
||||
return nil
|
||||
})
|
||||
|
||||
db.Update(func(tx *Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
bound := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(bound, uint64(count/2))
|
||||
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
|
||||
if err := c.Delete(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Seek([]byte("sub"))
|
||||
err := c.Delete()
|
||||
assert.Equal(t, err, ErrIncompatibleValue)
|
||||
return nil
|
||||
})
|
||||
|
||||
db.View(func(tx *Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert.Equal(t, b.Stats().KeyN, count/2+1)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
equals(t, b.Stats().KeyN, count/2+1)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -113,216 +114,223 @@ func TestCursor_Delete(t *testing.T) {
|
||||
//
|
||||
// Related: https://github.com/boltdb/bolt/pull/187
|
||||
func TestCursor_Seek_Large(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
var count = 10000
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Insert every other key between 0 and $count.
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, _ := tx.CreateBucket([]byte("widgets"))
|
||||
for i := 0; i < count; i += 100 {
|
||||
for j := i; j < i+100; j += 2 {
|
||||
k := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(k, uint64(j))
|
||||
b.Put(k, make([]byte, 100))
|
||||
}
|
||||
var count = 10000
|
||||
|
||||
// Insert every other key between 0 and $count.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, _ := tx.CreateBucket([]byte("widgets"))
|
||||
for i := 0; i < count; i += 100 {
|
||||
for j := i; j < i+100; j += 2 {
|
||||
k := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(k, uint64(j))
|
||||
b.Put(k, make([]byte, 100))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
db.View(func(tx *Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for i := 0; i < count; i++ {
|
||||
seek := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(seek, uint64(i))
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for i := 0; i < count; i++ {
|
||||
seek := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(seek, uint64(i))
|
||||
|
||||
k, _ := c.Seek(seek)
|
||||
k, _ := c.Seek(seek)
|
||||
|
||||
// The last seek is beyond the end of the the range so
|
||||
// it should return nil.
|
||||
if i == count-1 {
|
||||
assert.Nil(t, k)
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise we should seek to the exact key or the next key.
|
||||
num := binary.BigEndian.Uint64(k)
|
||||
if i%2 == 0 {
|
||||
assert.Equal(t, uint64(i), num)
|
||||
} else {
|
||||
assert.Equal(t, uint64(i+1), num)
|
||||
}
|
||||
// The last seek is beyond the end of the the range so
|
||||
// it should return nil.
|
||||
if i == count-1 {
|
||||
assert(t, k == nil, "")
|
||||
continue
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
// Otherwise we should seek to the exact key or the next key.
|
||||
num := binary.BigEndian.Uint64(k)
|
||||
if i%2 == 0 {
|
||||
equals(t, uint64(i), num)
|
||||
} else {
|
||||
equals(t, uint64(i+1), num)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a cursor can iterate over an empty bucket without error.
|
||||
func TestCursor_EmptyBucket(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db.View(func(tx *Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
k, v := c.First()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
k, v := c.First()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
|
||||
func TestCursor_EmptyBucketReverse(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db.View(func(tx *Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
k, v := c.Last()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
k, v := c.Last()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
|
||||
func TestCursor_Iterate_Leaf(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
|
||||
return nil
|
||||
})
|
||||
tx, _ := db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
k, v := c.First()
|
||||
assert.Equal(t, string(k), "bar")
|
||||
assert.Equal(t, v, []byte{1})
|
||||
|
||||
k, v = c.Next()
|
||||
assert.Equal(t, string(k), "baz")
|
||||
assert.Equal(t, v, []byte{})
|
||||
|
||||
k, v = c.Next()
|
||||
assert.Equal(t, string(k), "foo")
|
||||
assert.Equal(t, v, []byte{0})
|
||||
|
||||
k, v = c.Next()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
|
||||
k, v = c.Next()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
|
||||
tx.Rollback()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
|
||||
return nil
|
||||
})
|
||||
tx, _ := db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
k, v := c.First()
|
||||
equals(t, string(k), "bar")
|
||||
equals(t, v, []byte{1})
|
||||
|
||||
k, v = c.Next()
|
||||
equals(t, string(k), "baz")
|
||||
equals(t, v, []byte{})
|
||||
|
||||
k, v = c.Next()
|
||||
equals(t, string(k), "foo")
|
||||
equals(t, v, []byte{0})
|
||||
|
||||
k, v = c.Next()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
|
||||
k, v = c.Next()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
|
||||
tx.Rollback()
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
|
||||
func TestCursor_LeafRootReverse(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
|
||||
return nil
|
||||
})
|
||||
tx, _ := db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
k, v := c.Last()
|
||||
assert.Equal(t, string(k), "foo")
|
||||
assert.Equal(t, v, []byte{0})
|
||||
|
||||
k, v = c.Prev()
|
||||
assert.Equal(t, string(k), "baz")
|
||||
assert.Equal(t, v, []byte{})
|
||||
|
||||
k, v = c.Prev()
|
||||
assert.Equal(t, string(k), "bar")
|
||||
assert.Equal(t, v, []byte{1})
|
||||
|
||||
k, v = c.Prev()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
|
||||
k, v = c.Prev()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
|
||||
tx.Rollback()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
|
||||
return nil
|
||||
})
|
||||
tx, _ := db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
k, v := c.Last()
|
||||
equals(t, string(k), "foo")
|
||||
equals(t, v, []byte{0})
|
||||
|
||||
k, v = c.Prev()
|
||||
equals(t, string(k), "baz")
|
||||
equals(t, v, []byte{})
|
||||
|
||||
k, v = c.Prev()
|
||||
equals(t, string(k), "bar")
|
||||
equals(t, v, []byte{1})
|
||||
|
||||
k, v = c.Prev()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
|
||||
k, v = c.Prev()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
|
||||
tx.Rollback()
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can restart from the beginning.
|
||||
func TestCursor_Restart(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
tx, _ := db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
k, _ := c.First()
|
||||
assert.Equal(t, string(k), "bar")
|
||||
|
||||
k, _ = c.Next()
|
||||
assert.Equal(t, string(k), "foo")
|
||||
|
||||
k, _ = c.First()
|
||||
assert.Equal(t, string(k), "bar")
|
||||
|
||||
k, _ = c.Next()
|
||||
assert.Equal(t, string(k), "foo")
|
||||
|
||||
tx.Rollback()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
|
||||
return nil
|
||||
})
|
||||
|
||||
tx, _ := db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
|
||||
k, _ := c.First()
|
||||
equals(t, string(k), "bar")
|
||||
|
||||
k, _ = c.Next()
|
||||
equals(t, string(k), "foo")
|
||||
|
||||
k, _ = c.First()
|
||||
equals(t, string(k), "bar")
|
||||
|
||||
k, _ = c.Next()
|
||||
equals(t, string(k), "foo")
|
||||
|
||||
tx.Rollback()
|
||||
}
|
||||
|
||||
// Ensure that a Tx can iterate over all elements in a bucket.
|
||||
func TestCursor_QuickCheck(t *testing.T) {
|
||||
f := func(items testdata) bool {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
// Bulk insert all values.
|
||||
tx, _ := db.Begin(true)
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
for _, item := range items {
|
||||
assert.NoError(t, b.Put(item.Key, item.Value))
|
||||
}
|
||||
assert.NoError(t, tx.Commit())
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Sort test data.
|
||||
sort.Sort(items)
|
||||
// Bulk insert all values.
|
||||
tx, _ := db.Begin(true)
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
for _, item := range items {
|
||||
ok(t, b.Put(item.Key, item.Value))
|
||||
}
|
||||
ok(t, tx.Commit())
|
||||
|
||||
// Sort test data.
|
||||
sort.Sort(items)
|
||||
|
||||
// Iterate over all items and check consistency.
|
||||
var index = 0
|
||||
tx, _ = db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
|
||||
equals(t, k, items[index].Key)
|
||||
equals(t, v, items[index].Value)
|
||||
index++
|
||||
}
|
||||
equals(t, len(items), index)
|
||||
tx.Rollback()
|
||||
|
||||
// Iterate over all items and check consistency.
|
||||
var index = 0
|
||||
tx, _ = db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
|
||||
assert.Equal(t, k, items[index].Key)
|
||||
assert.Equal(t, v, items[index].Value)
|
||||
index++
|
||||
}
|
||||
assert.Equal(t, len(items), index)
|
||||
tx.Rollback()
|
||||
})
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(f, qconfig()); err != nil {
|
||||
@ -333,31 +341,33 @@ func TestCursor_QuickCheck(t *testing.T) {
|
||||
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
|
||||
func TestCursor_QuickCheck_Reverse(t *testing.T) {
|
||||
f := func(items testdata) bool {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
// Bulk insert all values.
|
||||
tx, _ := db.Begin(true)
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
for _, item := range items {
|
||||
assert.NoError(t, b.Put(item.Key, item.Value))
|
||||
}
|
||||
assert.NoError(t, tx.Commit())
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Sort test data.
|
||||
sort.Sort(revtestdata(items))
|
||||
// Bulk insert all values.
|
||||
tx, _ := db.Begin(true)
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
for _, item := range items {
|
||||
ok(t, b.Put(item.Key, item.Value))
|
||||
}
|
||||
ok(t, tx.Commit())
|
||||
|
||||
// Sort test data.
|
||||
sort.Sort(revtestdata(items))
|
||||
|
||||
// Iterate over all items and check consistency.
|
||||
var index = 0
|
||||
tx, _ = db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
|
||||
equals(t, k, items[index].Key)
|
||||
equals(t, v, items[index].Value)
|
||||
index++
|
||||
}
|
||||
equals(t, len(items), index)
|
||||
tx.Rollback()
|
||||
|
||||
// Iterate over all items and check consistency.
|
||||
var index = 0
|
||||
tx, _ = db.Begin(false)
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
|
||||
assert.Equal(t, k, items[index].Key)
|
||||
assert.Equal(t, v, items[index].Value)
|
||||
index++
|
||||
}
|
||||
assert.Equal(t, len(items), index)
|
||||
tx.Rollback()
|
||||
})
|
||||
return true
|
||||
}
|
||||
if err := quick.Check(f, qconfig()); err != nil {
|
||||
@ -367,54 +377,56 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
|
||||
|
||||
// Ensure that a Tx cursor can iterate over subbuckets.
|
||||
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.NoError(t, err)
|
||||
_, err = b.CreateBucket([]byte("foo"))
|
||||
assert.NoError(t, err)
|
||||
_, err = b.CreateBucket([]byte("bar"))
|
||||
assert.NoError(t, err)
|
||||
_, err = b.CreateBucket([]byte("baz"))
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *Tx) error {
|
||||
var names []string
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
names = append(names, string(k))
|
||||
assert.Nil(t, v)
|
||||
}
|
||||
assert.Equal(t, names, []string{"bar", "baz", "foo"})
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
ok(t, err)
|
||||
_, err = b.CreateBucket([]byte("foo"))
|
||||
ok(t, err)
|
||||
_, err = b.CreateBucket([]byte("bar"))
|
||||
ok(t, err)
|
||||
_, err = b.CreateBucket([]byte("baz"))
|
||||
ok(t, err)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
var names []string
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
names = append(names, string(k))
|
||||
assert(t, v == nil, "")
|
||||
}
|
||||
equals(t, names, []string{"bar", "baz", "foo"})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a Tx cursor can reverse iterate over subbuckets.
|
||||
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.NoError(t, err)
|
||||
_, err = b.CreateBucket([]byte("foo"))
|
||||
assert.NoError(t, err)
|
||||
_, err = b.CreateBucket([]byte("bar"))
|
||||
assert.NoError(t, err)
|
||||
_, err = b.CreateBucket([]byte("baz"))
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *Tx) error {
|
||||
var names []string
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
names = append(names, string(k))
|
||||
assert.Nil(t, v)
|
||||
}
|
||||
assert.Equal(t, names, []string{"foo", "baz", "bar"})
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
ok(t, err)
|
||||
_, err = b.CreateBucket([]byte("foo"))
|
||||
ok(t, err)
|
||||
_, err = b.CreateBucket([]byte("bar"))
|
||||
ok(t, err)
|
||||
_, err = b.CreateBucket([]byte("baz"))
|
||||
ok(t, err)
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
var names []string
|
||||
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||
names = append(names, string(k))
|
||||
assert(t, v == nil, "")
|
||||
}
|
||||
equals(t, names, []string{"foo", "baz", "bar"})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
4
db.go
4
db.go
@ -384,8 +384,8 @@ func (db *DB) beginRWTx() (*Tx, error) {
|
||||
// Free any pages associated with closed read-only transactions.
|
||||
var minid txid = 0xFFFFFFFFFFFFFFFF
|
||||
for _, t := range db.txs {
|
||||
if t.id() < minid {
|
||||
minid = t.id()
|
||||
if t.meta.txid < minid {
|
||||
minid = t.meta.txid
|
||||
}
|
||||
}
|
||||
if minid > 0 {
|
||||
|
737
db_test.go
737
db_test.go
@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bolt_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -12,29 +12,28 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
var statsFlag = flag.Bool("stats", false, "show performance stats")
|
||||
|
||||
// Ensure that opening a database with a bad path returns an error.
|
||||
func TestOpen_BadPath(t *testing.T) {
|
||||
db, err := Open("", 0666, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, db)
|
||||
db, err := bolt.Open("", 0666, nil)
|
||||
assert(t, err != nil, "err: %s", err)
|
||||
assert(t, db == nil, "")
|
||||
}
|
||||
|
||||
// Ensure that a database can be opened without error.
|
||||
func TestOpen(t *testing.T) {
|
||||
withTempPath(func(path string) {
|
||||
db, err := Open(path, 0666, nil)
|
||||
assert.NotNil(t, db)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, db.Path(), path)
|
||||
assert.NoError(t, db.Close())
|
||||
})
|
||||
path := tempfile()
|
||||
defer os.Remove(path)
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
assert(t, db != nil, "")
|
||||
ok(t, err)
|
||||
equals(t, db.Path(), path)
|
||||
ok(t, db.Close())
|
||||
}
|
||||
|
||||
// Ensure that opening an already open database file will timeout.
|
||||
@ -42,21 +41,23 @@ func TestOpen_Timeout(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("timeout not supported on windows")
|
||||
}
|
||||
withTempPath(func(path string) {
|
||||
// Open a data file.
|
||||
db0, err := Open(path, 0666, nil)
|
||||
assert.NotNil(t, db0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Attempt to open the database again.
|
||||
start := time.Now()
|
||||
db1, err := Open(path, 0666, &Options{Timeout: 100 * time.Millisecond})
|
||||
assert.Nil(t, db1)
|
||||
assert.Equal(t, ErrTimeout, err)
|
||||
assert.True(t, time.Since(start) > 100*time.Millisecond)
|
||||
path := tempfile()
|
||||
defer os.Remove(path)
|
||||
|
||||
db0.Close()
|
||||
})
|
||||
// Open a data file.
|
||||
db0, err := bolt.Open(path, 0666, nil)
|
||||
assert(t, db0 != nil, "")
|
||||
ok(t, err)
|
||||
|
||||
// Attempt to open the database again.
|
||||
start := time.Now()
|
||||
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond})
|
||||
assert(t, db1 == nil, "")
|
||||
equals(t, bolt.ErrTimeout, err)
|
||||
assert(t, time.Since(start) > 100*time.Millisecond, "")
|
||||
|
||||
db0.Close()
|
||||
}
|
||||
|
||||
// Ensure that opening an already open database file will wait until its closed.
|
||||
@ -64,48 +65,51 @@ func TestOpen_Wait(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("timeout not supported on windows")
|
||||
}
|
||||
withTempPath(func(path string) {
|
||||
// Open a data file.
|
||||
db0, err := Open(path, 0666, nil)
|
||||
assert.NotNil(t, db0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Close it in just a bit.
|
||||
time.AfterFunc(100*time.Millisecond, func() { db0.Close() })
|
||||
path := tempfile()
|
||||
defer os.Remove(path)
|
||||
|
||||
// Attempt to open the database again.
|
||||
start := time.Now()
|
||||
db1, err := Open(path, 0666, &Options{Timeout: 200 * time.Millisecond})
|
||||
assert.NotNil(t, db1)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, time.Since(start) > 100*time.Millisecond)
|
||||
})
|
||||
// Open a data file.
|
||||
db0, err := bolt.Open(path, 0666, nil)
|
||||
assert(t, db0 != nil, "")
|
||||
ok(t, err)
|
||||
|
||||
// Close it in just a bit.
|
||||
time.AfterFunc(100*time.Millisecond, func() { db0.Close() })
|
||||
|
||||
// Attempt to open the database again.
|
||||
start := time.Now()
|
||||
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond})
|
||||
assert(t, db1 != nil, "")
|
||||
ok(t, err)
|
||||
assert(t, time.Since(start) > 100*time.Millisecond, "")
|
||||
}
|
||||
|
||||
// Ensure that a re-opened database is consistent.
|
||||
func TestOpen_Check(t *testing.T) {
|
||||
withTempPath(func(path string) {
|
||||
db, err := Open(path, 0666, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.View(func(tx *Tx) error { return <-tx.Check() }))
|
||||
db.Close()
|
||||
path := tempfile()
|
||||
defer os.Remove(path)
|
||||
|
||||
db, err = Open(path, 0666, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.View(func(tx *Tx) error { return <-tx.Check() }))
|
||||
db.Close()
|
||||
})
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
ok(t, err)
|
||||
ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
|
||||
db.Close()
|
||||
|
||||
db, err = bolt.Open(path, 0666, nil)
|
||||
ok(t, err)
|
||||
ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
|
||||
db.Close()
|
||||
}
|
||||
|
||||
// Ensure that the database returns an error if the file handle cannot be open.
|
||||
func TestDB_Open_FileError(t *testing.T) {
|
||||
withTempPath(func(path string) {
|
||||
_, err := Open(path+"/youre-not-my-real-parent", 0666, nil)
|
||||
if err, _ := err.(*os.PathError); assert.Error(t, err) {
|
||||
assert.Equal(t, path+"/youre-not-my-real-parent", err.Path)
|
||||
assert.Equal(t, "open", err.Op)
|
||||
}
|
||||
})
|
||||
path := tempfile()
|
||||
defer os.Remove(path)
|
||||
|
||||
_, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil)
|
||||
assert(t, err.(*os.PathError) != nil, "")
|
||||
equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path)
|
||||
equals(t, "open", err.(*os.PathError).Op)
|
||||
}
|
||||
|
||||
// Ensure that write errors to the meta file handler during initialization are returned.
|
||||
@ -115,218 +119,227 @@ func TestDB_Open_MetaInitWriteError(t *testing.T) {
|
||||
|
||||
// Ensure that a database that is too small returns an error.
|
||||
func TestDB_Open_FileTooSmall(t *testing.T) {
|
||||
withTempPath(func(path string) {
|
||||
db, err := Open(path, 0666, nil)
|
||||
assert.NoError(t, err)
|
||||
db.Close()
|
||||
path := tempfile()
|
||||
defer os.Remove(path)
|
||||
|
||||
// corrupt the database
|
||||
assert.NoError(t, os.Truncate(path, int64(os.Getpagesize())))
|
||||
db, err := bolt.Open(path, 0666, nil)
|
||||
ok(t, err)
|
||||
db.Close()
|
||||
|
||||
db, err = Open(path, 0666, nil)
|
||||
assert.Equal(t, errors.New("file size too small"), err)
|
||||
})
|
||||
// corrupt the database
|
||||
ok(t, os.Truncate(path, int64(os.Getpagesize())))
|
||||
|
||||
db, err = bolt.Open(path, 0666, nil)
|
||||
equals(t, errors.New("file size too small"), err)
|
||||
}
|
||||
|
||||
// Ensure that corrupt meta0 page errors get returned.
|
||||
func TestDB_Open_CorruptMeta0(t *testing.T) {
|
||||
withTempPath(func(path string) {
|
||||
var m meta
|
||||
m.magic = magic
|
||||
m.version = version
|
||||
m.pageSize = 0x8000
|
||||
|
||||
// Create a file with bad magic.
|
||||
b := make([]byte, 0x10000)
|
||||
p0, p1 := (*page)(unsafe.Pointer(&b[0x0000])), (*page)(unsafe.Pointer(&b[0x8000]))
|
||||
p0.meta().magic = 0
|
||||
p0.meta().version = version
|
||||
p1.meta().magic = magic
|
||||
p1.meta().version = version
|
||||
err := ioutil.WriteFile(path, b, 0666)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Open the database.
|
||||
_, err = Open(path, 0666, nil)
|
||||
assert.Equal(t, err, errors.New("meta0 error: invalid database"))
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a corrupt meta page checksum causes the open to fail.
|
||||
func TestDB_Open_MetaChecksumError(t *testing.T) {
|
||||
for i := 0; i < 2; i++ {
|
||||
withTempPath(func(path string) {
|
||||
db, err := Open(path, 0600, nil)
|
||||
pageSize := db.pageSize
|
||||
db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("woojits"))
|
||||
return err
|
||||
})
|
||||
db.Close()
|
||||
|
||||
// Change a single byte in the meta page.
|
||||
f, _ := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)
|
||||
f.WriteAt([]byte{1}, int64((i*pageSize)+(pageHeaderSize+12)))
|
||||
f.Sync()
|
||||
f.Close()
|
||||
|
||||
// Reopen the database.
|
||||
_, err = Open(path, 0600, nil)
|
||||
if assert.Error(t, err) {
|
||||
if i == 0 {
|
||||
assert.Equal(t, "meta0 error: checksum error", err.Error())
|
||||
} else {
|
||||
assert.Equal(t, "meta1 error: checksum error", err.Error())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
// TODO(benbjohnson): Test corruption at every byte of the first two pages.
|
||||
|
||||
// Ensure that a database cannot open a transaction when it's not open.
|
||||
func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
|
||||
var db DB
|
||||
var db bolt.DB
|
||||
tx, err := db.Begin(false)
|
||||
assert.Nil(t, tx)
|
||||
assert.Equal(t, err, ErrDatabaseNotOpen)
|
||||
assert(t, tx == nil, "")
|
||||
equals(t, err, bolt.ErrDatabaseNotOpen)
|
||||
}
|
||||
|
||||
// Ensure that a read-write transaction can be retrieved.
|
||||
func TestDB_BeginRW(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
tx, err := db.Begin(true)
|
||||
assert.NotNil(t, tx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tx.DB(), db)
|
||||
assert.Equal(t, tx.Writable(), true)
|
||||
assert.NoError(t, tx.Commit())
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
tx, err := db.Begin(true)
|
||||
assert(t, tx != nil, "")
|
||||
ok(t, err)
|
||||
assert(t, tx.DB() == db.DB, "")
|
||||
equals(t, tx.Writable(), true)
|
||||
ok(t, tx.Commit())
|
||||
}
|
||||
|
||||
// Ensure that opening a transaction while the DB is closed returns an error.
|
||||
func TestDB_BeginRW_Closed(t *testing.T) {
|
||||
var db DB
|
||||
var db bolt.DB
|
||||
tx, err := db.Begin(true)
|
||||
assert.Equal(t, err, ErrDatabaseNotOpen)
|
||||
assert.Nil(t, tx)
|
||||
equals(t, err, bolt.ErrDatabaseNotOpen)
|
||||
assert(t, tx == nil, "")
|
||||
}
|
||||
|
||||
// Ensure a database can provide a transactional block.
|
||||
func TestDB_Update(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
err := db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
b.Put([]byte("foo"), []byte("bar"))
|
||||
b.Put([]byte("baz"), []byte("bat"))
|
||||
b.Delete([]byte("foo"))
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
err = db.View(func(tx *Tx) error {
|
||||
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
b.Put([]byte("foo"), []byte("bar"))
|
||||
b.Put([]byte("baz"), []byte("bat"))
|
||||
b.Delete([]byte("foo"))
|
||||
return nil
|
||||
})
|
||||
ok(t, err)
|
||||
err = db.View(func(tx *bolt.Tx) error {
|
||||
assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
|
||||
equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
|
||||
return nil
|
||||
})
|
||||
ok(t, err)
|
||||
}
|
||||
|
||||
// Ensure a closed database returns an error while running a transaction block
|
||||
func TestDB_Update_Closed(t *testing.T) {
|
||||
var db DB
|
||||
err := db.Update(func(tx *Tx) error {
|
||||
var db bolt.DB
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
return nil
|
||||
})
|
||||
assert.Equal(t, err, ErrDatabaseNotOpen)
|
||||
equals(t, err, bolt.ErrDatabaseNotOpen)
|
||||
}
|
||||
|
||||
// Ensure a panic occurs while trying to commit a managed transaction.
|
||||
func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
|
||||
var db DB
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
assert.Panics(t, func() { tx.Commit() })
|
||||
assert.Panics(t, func() { tx.Rollback() })
|
||||
func TestDB_Update_ManualCommit(t *testing.T) {
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
var ok bool
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ok = true
|
||||
}
|
||||
}()
|
||||
tx.Commit()
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
db.View(func(tx *Tx) error {
|
||||
assert.Panics(t, func() { tx.Commit() })
|
||||
assert.Panics(t, func() { tx.Rollback() })
|
||||
assert(t, ok, "expected panic")
|
||||
}
|
||||
|
||||
// Ensure a panic occurs while trying to rollback a managed transaction.
|
||||
func TestDB_Update_ManualRollback(t *testing.T) {
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
var ok bool
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ok = true
|
||||
}
|
||||
}()
|
||||
tx.Rollback()
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
assert(t, ok, "expected panic")
|
||||
}
|
||||
|
||||
// Ensure a panic occurs while trying to commit a managed transaction.
|
||||
func TestDB_View_ManualCommit(t *testing.T) {
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
var ok bool
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ok = true
|
||||
}
|
||||
}()
|
||||
tx.Commit()
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
assert(t, ok, "expected panic")
|
||||
}
|
||||
|
||||
// Ensure a panic occurs while trying to rollback a managed transaction.
|
||||
func TestDB_View_ManualRollback(t *testing.T) {
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
var ok bool
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ok = true
|
||||
}
|
||||
}()
|
||||
tx.Rollback()
|
||||
}()
|
||||
return nil
|
||||
})
|
||||
assert(t, ok, "expected panic")
|
||||
}
|
||||
|
||||
// Ensure a write transaction that panics does not hold open locks.
|
||||
func TestDB_Update_Panic(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
warn("recover: update", r)
|
||||
}
|
||||
}()
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
panic("omg")
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Log("recover: update", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Verify we can update again.
|
||||
err := db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
panic("omg")
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}()
|
||||
|
||||
// Verify that our change persisted.
|
||||
err = db.Update(func(tx *Tx) error {
|
||||
assert.NotNil(t, tx.Bucket([]byte("widgets")))
|
||||
return nil
|
||||
})
|
||||
// Verify we can update again.
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
ok(t, err)
|
||||
|
||||
// Verify that our change persisted.
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure a database can return an error through a read-only transactional block.
|
||||
func TestDB_View_Error(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
err := db.View(func(tx *Tx) error {
|
||||
return errors.New("xxx")
|
||||
})
|
||||
assert.Equal(t, errors.New("xxx"), err)
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
return errors.New("xxx")
|
||||
})
|
||||
equals(t, errors.New("xxx"), err)
|
||||
}
|
||||
|
||||
// Ensure a read transaction that panics does not hold open locks.
|
||||
func TestDB_View_Panic(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
return nil
|
||||
})
|
||||
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
warn("recover: view", r)
|
||||
}
|
||||
}()
|
||||
db.View(func(tx *Tx) error {
|
||||
assert.NotNil(t, tx.Bucket([]byte("widgets")))
|
||||
panic("omg")
|
||||
})
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
t.Log("recover: view", r)
|
||||
}
|
||||
}()
|
||||
|
||||
// Verify that we can still use read transactions.
|
||||
db.View(func(tx *Tx) error {
|
||||
assert.NotNil(t, tx.Bucket([]byte("widgets")))
|
||||
return nil
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
|
||||
panic("omg")
|
||||
})
|
||||
}()
|
||||
|
||||
// Verify that we can still use read transactions.
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
assert(t, tx.Bucket([]byte("widgets")) != nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -337,157 +350,85 @@ func TestDB_Commit_WriteFail(t *testing.T) {
|
||||
|
||||
// Ensure that DB stats can be returned.
|
||||
func TestDB_Stats(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
stats := db.Stats()
|
||||
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
|
||||
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
|
||||
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that the mmap grows appropriately.
|
||||
func TestDB_mmapSize(t *testing.T) {
|
||||
db := &DB{pageSize: 4096}
|
||||
assert.Equal(t, db.mmapSize(0), minMmapSize)
|
||||
assert.Equal(t, db.mmapSize(16384), minMmapSize)
|
||||
assert.Equal(t, db.mmapSize(minMmapSize-1), minMmapSize)
|
||||
assert.Equal(t, db.mmapSize(minMmapSize), minMmapSize)
|
||||
assert.Equal(t, db.mmapSize(minMmapSize+1), (minMmapSize*2)+4096)
|
||||
assert.Equal(t, db.mmapSize(10000000), 20000768)
|
||||
assert.Equal(t, db.mmapSize((1<<30)-1), 2147483648)
|
||||
assert.Equal(t, db.mmapSize(1<<30), 1<<31)
|
||||
stats := db.Stats()
|
||||
equals(t, 2, stats.TxStats.PageCount)
|
||||
equals(t, 0, stats.FreePageN)
|
||||
equals(t, 2, stats.PendingPageN)
|
||||
}
|
||||
|
||||
// Ensure that database pages are in expected order and type.
|
||||
func TestDB_Consistency(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
db.Update(func(tx *Tx) error {
|
||||
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
db.Update(func(tx *Tx) error {
|
||||
if p, _ := tx.Page(0); assert.NotNil(t, p) {
|
||||
assert.Equal(t, "meta", p.Type)
|
||||
}
|
||||
if p, _ := tx.Page(1); assert.NotNil(t, p) {
|
||||
assert.Equal(t, "meta", p.Type)
|
||||
}
|
||||
if p, _ := tx.Page(2); assert.NotNil(t, p) {
|
||||
assert.Equal(t, "free", p.Type)
|
||||
}
|
||||
if p, _ := tx.Page(3); assert.NotNil(t, p) {
|
||||
assert.Equal(t, "free", p.Type)
|
||||
}
|
||||
if p, _ := tx.Page(4); assert.NotNil(t, p) {
|
||||
assert.Equal(t, "leaf", p.Type) // root leaf
|
||||
}
|
||||
if p, _ := tx.Page(5); assert.NotNil(t, p) {
|
||||
assert.Equal(t, "freelist", p.Type)
|
||||
}
|
||||
p, _ := tx.Page(6)
|
||||
assert.Nil(t, p)
|
||||
for i := 0; i < 10; i++ {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
p, _ := tx.Page(0)
|
||||
assert(t, p != nil, "")
|
||||
equals(t, "meta", p.Type)
|
||||
|
||||
// Ensure that a database can return a string representation of itself.
|
||||
func TestDB_String(t *testing.T) {
|
||||
db := &DB{path: "/foo/bar"}
|
||||
assert.Equal(t, db.String(), `DB<"/foo/bar">`)
|
||||
assert.Equal(t, db.GoString(), `bolt.DB{path:"/foo/bar"}`)
|
||||
p, _ = tx.Page(1)
|
||||
assert(t, p != nil, "")
|
||||
equals(t, "meta", p.Type)
|
||||
|
||||
p, _ = tx.Page(2)
|
||||
assert(t, p != nil, "")
|
||||
equals(t, "free", p.Type)
|
||||
|
||||
p, _ = tx.Page(3)
|
||||
assert(t, p != nil, "")
|
||||
equals(t, "free", p.Type)
|
||||
|
||||
p, _ = tx.Page(4)
|
||||
assert(t, p != nil, "")
|
||||
equals(t, "leaf", p.Type)
|
||||
|
||||
p, _ = tx.Page(5)
|
||||
assert(t, p != nil, "")
|
||||
equals(t, "freelist", p.Type)
|
||||
|
||||
p, _ = tx.Page(6)
|
||||
assert(t, p == nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that DB stats can be substracted from one another.
|
||||
func TestDBStats_Sub(t *testing.T) {
|
||||
var a, b Stats
|
||||
var a, b bolt.Stats
|
||||
a.TxStats.PageCount = 3
|
||||
a.FreePageN = 4
|
||||
b.TxStats.PageCount = 10
|
||||
b.FreePageN = 14
|
||||
diff := b.Sub(&a)
|
||||
assert.Equal(t, 7, diff.TxStats.PageCount)
|
||||
equals(t, 7, diff.TxStats.PageCount)
|
||||
// free page stats are copied from the receiver and not subtracted
|
||||
assert.Equal(t, 14, diff.FreePageN)
|
||||
}
|
||||
|
||||
// Ensure that meta with bad magic is invalid.
|
||||
func TestMeta_validate_magic(t *testing.T) {
|
||||
m := &meta{magic: 0x01234567}
|
||||
assert.Equal(t, m.validate(), ErrInvalid)
|
||||
}
|
||||
|
||||
// Ensure that meta with a bad version is invalid.
|
||||
func TestMeta_validate_version(t *testing.T) {
|
||||
m := &meta{magic: magic, version: 200}
|
||||
assert.Equal(t, m.validate(), ErrVersionMismatch)
|
||||
}
|
||||
|
||||
// Ensure that a DB in strict mode will fail when corrupted.
|
||||
func TestDB_StrictMode(t *testing.T) {
|
||||
var msg string
|
||||
func() {
|
||||
defer func() {
|
||||
msg = fmt.Sprintf("%s", recover())
|
||||
}()
|
||||
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.StrictMode = true
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("foo"))
|
||||
|
||||
// Corrupt the DB by extending the high water mark.
|
||||
tx.meta.pgid++
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}()
|
||||
|
||||
assert.Equal(t, "check fail: page 4: unreachable unfreed", msg)
|
||||
}
|
||||
|
||||
// Ensure that a double freeing a page will result in a panic.
|
||||
func TestDB_DoubleFree(t *testing.T) {
|
||||
var msg string
|
||||
func() {
|
||||
defer func() {
|
||||
msg = fmt.Sprintf("%s", recover())
|
||||
}()
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("foo"))
|
||||
|
||||
// Corrupt the DB by adding a page to the freelist.
|
||||
db.freelist.free(0, tx.page(3))
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}()
|
||||
|
||||
assert.Equal(t, "assertion failed: page 3 already freed", msg)
|
||||
equals(t, 14, diff.FreePageN)
|
||||
}
|
||||
|
||||
func ExampleDB_Update() {
|
||||
// Open the database.
|
||||
db, _ := Open(tempfile(), 0666, nil)
|
||||
db, _ := bolt.Open(tempfile(), 0666, nil)
|
||||
defer os.Remove(db.Path())
|
||||
defer db.Close()
|
||||
|
||||
// Execute several commands within a write transaction.
|
||||
err := db.Update(func(tx *Tx) error {
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -500,7 +441,7 @@ func ExampleDB_Update() {
|
||||
|
||||
// If our transactional block didn't return an error then our data is saved.
|
||||
if err == nil {
|
||||
db.View(func(tx *Tx) error {
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||
fmt.Printf("The value of 'foo' is: %s\n", value)
|
||||
return nil
|
||||
@ -513,12 +454,12 @@ func ExampleDB_Update() {
|
||||
|
||||
func ExampleDB_View() {
|
||||
// Open the database.
|
||||
db, _ := Open(tempfile(), 0666, nil)
|
||||
db, _ := bolt.Open(tempfile(), 0666, nil)
|
||||
defer os.Remove(db.Path())
|
||||
defer db.Close()
|
||||
|
||||
// Insert data into a bucket.
|
||||
db.Update(func(tx *Tx) error {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("people"))
|
||||
b := tx.Bucket([]byte("people"))
|
||||
b.Put([]byte("john"), []byte("doe"))
|
||||
@ -527,7 +468,7 @@ func ExampleDB_View() {
|
||||
})
|
||||
|
||||
// Access data from within a read-only transactional block.
|
||||
db.View(func(tx *Tx) error {
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
v := tx.Bucket([]byte("people")).Get([]byte("john"))
|
||||
fmt.Printf("John's last name is %s.\n", v)
|
||||
return nil
|
||||
@ -539,12 +480,12 @@ func ExampleDB_View() {
|
||||
|
||||
func ExampleDB_Begin_ReadOnly() {
|
||||
// Open the database.
|
||||
db, _ := Open(tempfile(), 0666, nil)
|
||||
db, _ := bolt.Open(tempfile(), 0666, nil)
|
||||
defer os.Remove(db.Path())
|
||||
defer db.Close()
|
||||
|
||||
// Create a bucket.
|
||||
db.Update(func(tx *Tx) error {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
@ -571,44 +512,54 @@ func ExampleDB_Begin_ReadOnly() {
|
||||
// zephyr likes purple
|
||||
}
|
||||
|
||||
// tempfile returns a temporary file path.
|
||||
func tempfile() string {
|
||||
f, _ := ioutil.TempFile("", "bolt-")
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
return f.Name()
|
||||
// TestDB represents a wrapper around a Bolt DB to handle temporary file
|
||||
// creation and automatic cleanup on close.
|
||||
type TestDB struct {
|
||||
*bolt.DB
|
||||
}
|
||||
|
||||
// withTempPath executes a function with a database reference.
|
||||
func withTempPath(fn func(string)) {
|
||||
path := tempfile()
|
||||
defer os.RemoveAll(path)
|
||||
fn(path)
|
||||
// NewTestDB returns a new instance of TestDB.
|
||||
func NewTestDB() *TestDB {
|
||||
db, err := bolt.Open(tempfile(), 0666, nil)
|
||||
if err != nil {
|
||||
panic("cannot open db: " + err.Error())
|
||||
}
|
||||
return &TestDB{db}
|
||||
}
|
||||
|
||||
// withOpenDB executes a function with an already opened database.
|
||||
func withOpenDB(fn func(*DB, string)) {
|
||||
withTempPath(func(path string) {
|
||||
db, err := Open(path, 0666, nil)
|
||||
if err != nil {
|
||||
panic("cannot open db: " + err.Error())
|
||||
}
|
||||
defer db.Close()
|
||||
fn(db, path)
|
||||
// Close closes the database and deletes the underlying file.
|
||||
func (db *TestDB) Close() {
|
||||
// Log statistics.
|
||||
if *statsFlag {
|
||||
db.PrintStats()
|
||||
}
|
||||
|
||||
// Log statistics.
|
||||
if *statsFlag {
|
||||
logStats(db)
|
||||
}
|
||||
// Check database consistency after every test.
|
||||
db.MustCheck()
|
||||
|
||||
// Check database consistency after every test.
|
||||
mustCheck(db)
|
||||
})
|
||||
// Close database and remove file.
|
||||
defer os.Remove(db.Path())
|
||||
db.DB.Close()
|
||||
}
|
||||
|
||||
// mustCheck runs a consistency check on the database and panics if any errors are found.
|
||||
func mustCheck(db *DB) {
|
||||
db.View(func(tx *Tx) error {
|
||||
// PrintStats prints the database stats
|
||||
func (db *TestDB) PrintStats() {
|
||||
var stats = db.Stats()
|
||||
fmt.Printf("[db] %-20s %-20s %-20s\n",
|
||||
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
|
||||
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
|
||||
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
|
||||
)
|
||||
fmt.Printf(" %-20s %-20s %-20s\n",
|
||||
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
|
||||
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
|
||||
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
|
||||
)
|
||||
}
|
||||
|
||||
// MustCheck runs a consistency check on the database and panics if any errors are found.
|
||||
func (db *TestDB) MustCheck() {
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Collect all the errors.
|
||||
var errors []error
|
||||
for err := range tx.Check() {
|
||||
@ -640,8 +591,23 @@ func mustCheck(db *DB) {
|
||||
})
|
||||
}
|
||||
|
||||
// CopyTempFile copies a database to a temporary file.
|
||||
func (db *TestDB) CopyTempFile() {
|
||||
path := tempfile()
|
||||
db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) })
|
||||
fmt.Println("db copied to: ", path)
|
||||
}
|
||||
|
||||
// tempfile returns a temporary file path.
|
||||
func tempfile() string {
|
||||
f, _ := ioutil.TempFile("", "bolt-")
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
// mustContainKeys checks that a bucket contains a given set of keys.
|
||||
func mustContainKeys(b *Bucket, m map[string]string) {
|
||||
func mustContainKeys(b *bolt.Bucket, m map[string]string) {
|
||||
found := make(map[string]string)
|
||||
b.ForEach(func(k, _ []byte) error {
|
||||
found[string(k)] = ""
|
||||
@ -679,29 +645,6 @@ func trunc(b []byte, length int) []byte {
|
||||
return b
|
||||
}
|
||||
|
||||
// writes the current database stats to the testing log.
|
||||
func logStats(db *DB) {
|
||||
var stats = db.Stats()
|
||||
fmt.Printf("[db] %-20s %-20s %-20s\n",
|
||||
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
|
||||
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
|
||||
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
|
||||
)
|
||||
fmt.Printf(" %-20s %-20s %-20s\n",
|
||||
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
|
||||
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
|
||||
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
|
||||
)
|
||||
}
|
||||
|
||||
func truncDuration(d time.Duration) string {
|
||||
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
|
||||
}
|
||||
|
||||
// copyAndFailNow copies a database to a new location and then fails then test.
|
||||
func copyAndFailNow(t *testing.T, db *DB) {
|
||||
path := tempfile()
|
||||
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
|
||||
fmt.Println("db copied to: ", path)
|
||||
t.FailNow()
|
||||
}
|
||||
|
@ -1,24 +1,27 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a page is added to a transaction's freelist.
|
||||
func TestFreelist_free(t *testing.T) {
|
||||
f := newFreelist()
|
||||
f.free(100, &page{id: 12})
|
||||
assert.Equal(t, f.pending[100], []pgid{12})
|
||||
if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
|
||||
t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a page and its overflow is added to a transaction's freelist.
|
||||
func TestFreelist_free_overflow(t *testing.T) {
|
||||
f := newFreelist()
|
||||
f.free(100, &page{id: 12, overflow: 3})
|
||||
assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15})
|
||||
if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a transaction's free pages can be released.
|
||||
@ -29,25 +32,56 @@ func TestFreelist_release(t *testing.T) {
|
||||
f.free(102, &page{id: 39})
|
||||
f.release(100)
|
||||
f.release(101)
|
||||
assert.Equal(t, []pgid{9, 12, 13}, f.ids)
|
||||
if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
|
||||
f.release(102)
|
||||
assert.Equal(t, []pgid{9, 12, 13, 39}, f.ids)
|
||||
if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a freelist can find contiguous blocks of pages.
|
||||
func TestFreelist_allocate(t *testing.T) {
|
||||
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
|
||||
assert.Equal(t, 3, int(f.allocate(3)))
|
||||
assert.Equal(t, 6, int(f.allocate(1)))
|
||||
assert.Equal(t, 0, int(f.allocate(3)))
|
||||
assert.Equal(t, 12, int(f.allocate(2)))
|
||||
assert.Equal(t, 7, int(f.allocate(1)))
|
||||
assert.Equal(t, 0, int(f.allocate(0)))
|
||||
assert.Equal(t, []pgid{9, 18}, f.ids)
|
||||
assert.Equal(t, 9, int(f.allocate(1)))
|
||||
assert.Equal(t, 18, int(f.allocate(1)))
|
||||
assert.Equal(t, 0, int(f.allocate(1)))
|
||||
assert.Equal(t, []pgid{}, f.ids)
|
||||
if id := int(f.allocate(3)); id != 3 {
|
||||
t.Fatalf("exp=3; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 6 {
|
||||
t.Fatalf("exp=6; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(3)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(2)); id != 12 {
|
||||
t.Fatalf("exp=12; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 7 {
|
||||
t.Fatalf("exp=7; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(0)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(0)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
|
||||
if id := int(f.allocate(1)); id != 9 {
|
||||
t.Fatalf("exp=9; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 18 {
|
||||
t.Fatalf("exp=18; got=%v", id)
|
||||
}
|
||||
if id := int(f.allocate(1)); id != 0 {
|
||||
t.Fatalf("exp=0; got=%v", id)
|
||||
}
|
||||
if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a freelist can deserialize from a freelist page.
|
||||
@ -68,9 +102,9 @@ func TestFreelist_read(t *testing.T) {
|
||||
f.read(page)
|
||||
|
||||
// Ensure that there are two page ids in the freelist.
|
||||
assert.Equal(t, len(f.ids), 2)
|
||||
assert.Equal(t, f.ids[0], pgid(23))
|
||||
assert.Equal(t, f.ids[1], pgid(50))
|
||||
if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f.ids)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a freelist can serialize into a freelist page.
|
||||
@ -89,10 +123,7 @@ func TestFreelist_write(t *testing.T) {
|
||||
|
||||
// Ensure that the freelist is correct.
|
||||
// All pages should be present and in reverse order.
|
||||
assert.Equal(t, len(f2.ids), 5)
|
||||
assert.Equal(t, f2.ids[0], pgid(3))
|
||||
assert.Equal(t, f2.ids[1], pgid(11))
|
||||
assert.Equal(t, f2.ids[2], pgid(12))
|
||||
assert.Equal(t, f2.ids[3], pgid(28))
|
||||
assert.Equal(t, f2.ids[4], pgid(39))
|
||||
if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
|
||||
t.Fatalf("exp=%v; got=%v", exp, f2.ids)
|
||||
}
|
||||
}
|
||||
|
4
node.go
4
node.go
@ -337,7 +337,7 @@ func (n *node) spill() error {
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.id(), tx.page(node.pgid))
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
@ -565,7 +565,7 @@ func (n *node) dereference() {
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.id(), n.bucket.tx.page(n.pgid))
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
|
83
node_test.go
83
node_test.go
@ -3,8 +3,6 @@ package bolt
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Ensure that a node can insert a key/value.
|
||||
@ -14,14 +12,22 @@ func TestNode_put(t *testing.T) {
|
||||
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
|
||||
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
|
||||
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
|
||||
assert.Equal(t, len(n.inodes), 3)
|
||||
assert.Equal(t, n.inodes[0].key, []byte("bar"))
|
||||
assert.Equal(t, n.inodes[0].value, []byte("1"))
|
||||
assert.Equal(t, n.inodes[1].key, []byte("baz"))
|
||||
assert.Equal(t, n.inodes[1].value, []byte("2"))
|
||||
assert.Equal(t, n.inodes[2].key, []byte("foo"))
|
||||
assert.Equal(t, n.inodes[2].value, []byte("3"))
|
||||
assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag))
|
||||
|
||||
if len(n.inodes) != 3 {
|
||||
t.Fatalf("exp=3; got=%d", len(n.inodes))
|
||||
}
|
||||
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
|
||||
t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
|
||||
t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
|
||||
t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if n.inodes[2].flags != uint32(leafPageFlag) {
|
||||
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node can deserialize from a leaf page.
|
||||
@ -47,12 +53,18 @@ func TestNode_read_LeafPage(t *testing.T) {
|
||||
n.read(page)
|
||||
|
||||
// Check that there are two inodes with correct data.
|
||||
assert.True(t, n.isLeaf)
|
||||
assert.Equal(t, len(n.inodes), 2)
|
||||
assert.Equal(t, n.inodes[0].key, []byte("bar"))
|
||||
assert.Equal(t, n.inodes[0].value, []byte("fooz"))
|
||||
assert.Equal(t, n.inodes[1].key, []byte("helloworld"))
|
||||
assert.Equal(t, n.inodes[1].value, []byte("bye"))
|
||||
if !n.isLeaf {
|
||||
t.Fatal("expected leaf")
|
||||
}
|
||||
if len(n.inodes) != 2 {
|
||||
t.Fatalf("exp=2; got=%d", len(n.inodes))
|
||||
}
|
||||
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
|
||||
t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
|
||||
t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node can serialize into a leaf page.
|
||||
@ -73,13 +85,18 @@ func TestNode_write_LeafPage(t *testing.T) {
|
||||
n2.read(p)
|
||||
|
||||
// Check that the two pages are the same.
|
||||
assert.Equal(t, len(n2.inodes), 3)
|
||||
assert.Equal(t, n2.inodes[0].key, []byte("john"))
|
||||
assert.Equal(t, n2.inodes[0].value, []byte("johnson"))
|
||||
assert.Equal(t, n2.inodes[1].key, []byte("ricki"))
|
||||
assert.Equal(t, n2.inodes[1].value, []byte("lake"))
|
||||
assert.Equal(t, n2.inodes[2].key, []byte("susy"))
|
||||
assert.Equal(t, n2.inodes[2].value, []byte("que"))
|
||||
if len(n2.inodes) != 3 {
|
||||
t.Fatalf("exp=3; got=%d", len(n2.inodes))
|
||||
}
|
||||
if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
|
||||
t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
|
||||
t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
|
||||
}
|
||||
if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
|
||||
t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node can split into appropriate subgroups.
|
||||
@ -96,9 +113,15 @@ func TestNode_split(t *testing.T) {
|
||||
n.split(100)
|
||||
|
||||
var parent = n.parent
|
||||
assert.Equal(t, len(parent.children), 2)
|
||||
assert.Equal(t, len(parent.children[0].inodes), 2)
|
||||
assert.Equal(t, len(parent.children[1].inodes), 3)
|
||||
if len(parent.children) != 2 {
|
||||
t.Fatalf("exp=2; got=%d", len(parent.children))
|
||||
}
|
||||
if len(parent.children[0].inodes) != 2 {
|
||||
t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
|
||||
}
|
||||
if len(parent.children[1].inodes) != 3 {
|
||||
t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a page with the minimum number of inodes just returns a single node.
|
||||
@ -110,7 +133,9 @@ func TestNode_split_MinKeys(t *testing.T) {
|
||||
|
||||
// Split.
|
||||
n.split(20)
|
||||
assert.Nil(t, n.parent)
|
||||
if n.parent != nil {
|
||||
t.Fatalf("expected nil parent")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that a node that has keys that all fit on a page just returns one leaf.
|
||||
@ -125,5 +150,7 @@ func TestNode_split_SinglePage(t *testing.T) {
|
||||
|
||||
// Split.
|
||||
n.split(4096)
|
||||
assert.Nil(t, n.parent)
|
||||
if n.parent != nil {
|
||||
t.Fatalf("expected nil parent")
|
||||
}
|
||||
}
|
||||
|
21
page_test.go
21
page_test.go
@ -1,17 +1,26 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Ensure that the page type can be returned in human readable format.
|
||||
func TestPage_typ(t *testing.T) {
|
||||
assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch")
|
||||
assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf")
|
||||
assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta")
|
||||
assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist")
|
||||
assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>")
|
||||
if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
|
||||
t.Fatalf("exp=branch; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
|
||||
t.Fatalf("exp=leaf; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
|
||||
t.Fatalf("exp=meta; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
|
||||
t.Fatalf("exp=freelist; got=%v", typ)
|
||||
}
|
||||
if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
|
||||
t.Fatalf("exp=unknown<4e20>; got=%v", typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that the hexdump debugging function doesn't blow up.
|
||||
|
@ -1,9 +1,11 @@
|
||||
package bolt
|
||||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing/quick"
|
||||
"time"
|
||||
@ -28,8 +30,8 @@ func init() {
|
||||
flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "")
|
||||
flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
|
||||
flag.Parse()
|
||||
warn("seed:", qseed)
|
||||
warnf("quick settings: count=%v, items=%v, ksize=%v, vsize=%v", qcount, qmaxitems, qmaxksize, qmaxvsize)
|
||||
fmt.Fprintln(os.Stderr, "seed:", qseed)
|
||||
fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
|
||||
}
|
||||
|
||||
func qconfig() *quick.Config {
|
||||
|
@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bolt_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -7,7 +7,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
|
||||
@ -39,86 +39,88 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
|
||||
var readerHandlers = []simulateHandler{simulateGetHandler}
|
||||
var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
|
||||
|
||||
var versions = make(map[txid]*QuickDB)
|
||||
var versions = make(map[int]*QuickDB)
|
||||
versions[1] = NewQuickDB()
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
var mutex sync.Mutex
|
||||
|
||||
// Run n threads in parallel, each with their own operation.
|
||||
var wg sync.WaitGroup
|
||||
var threads = make(chan bool, parallelism)
|
||||
var i int
|
||||
for {
|
||||
threads <- true
|
||||
wg.Add(1)
|
||||
writable := ((rand.Int() % 100) < 20) // 20% writers
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Choose an operation to execute.
|
||||
var handler simulateHandler
|
||||
if writable {
|
||||
handler = writerHandlers[rand.Intn(len(writerHandlers))]
|
||||
} else {
|
||||
handler = readerHandlers[rand.Intn(len(readerHandlers))]
|
||||
}
|
||||
var mutex sync.Mutex
|
||||
|
||||
// Execute a thread for the given operation.
|
||||
go func(writable bool, handler simulateHandler) {
|
||||
defer wg.Done()
|
||||
// Run n threads in parallel, each with their own operation.
|
||||
var wg sync.WaitGroup
|
||||
var threads = make(chan bool, parallelism)
|
||||
var i int
|
||||
for {
|
||||
threads <- true
|
||||
wg.Add(1)
|
||||
writable := ((rand.Int() % 100) < 20) // 20% writers
|
||||
|
||||
// Start transaction.
|
||||
tx, err := db.Begin(writable)
|
||||
if err != nil {
|
||||
t.Fatal("tx begin: ", err)
|
||||
}
|
||||
|
||||
// Obtain current state of the dataset.
|
||||
mutex.Lock()
|
||||
var qdb = versions[tx.id()]
|
||||
if writable {
|
||||
qdb = versions[tx.id()-1].Copy()
|
||||
}
|
||||
mutex.Unlock()
|
||||
|
||||
// Make sure we commit/rollback the tx at the end and update the state.
|
||||
if writable {
|
||||
defer func() {
|
||||
mutex.Lock()
|
||||
versions[tx.id()] = qdb
|
||||
mutex.Unlock()
|
||||
|
||||
assert.NoError(t, tx.Commit())
|
||||
}()
|
||||
} else {
|
||||
defer tx.Rollback()
|
||||
}
|
||||
|
||||
// Ignore operation if we don't have data yet.
|
||||
if qdb == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Execute handler.
|
||||
handler(tx, qdb)
|
||||
|
||||
// Release a thread back to the scheduling loop.
|
||||
<-threads
|
||||
}(writable, handler)
|
||||
|
||||
i++
|
||||
if i > threadCount {
|
||||
break
|
||||
}
|
||||
// Choose an operation to execute.
|
||||
var handler simulateHandler
|
||||
if writable {
|
||||
handler = writerHandlers[rand.Intn(len(writerHandlers))]
|
||||
} else {
|
||||
handler = readerHandlers[rand.Intn(len(readerHandlers))]
|
||||
}
|
||||
|
||||
// Wait until all threads are done.
|
||||
wg.Wait()
|
||||
})
|
||||
// Execute a thread for the given operation.
|
||||
go func(writable bool, handler simulateHandler) {
|
||||
defer wg.Done()
|
||||
|
||||
// Start transaction.
|
||||
tx, err := db.Begin(writable)
|
||||
if err != nil {
|
||||
t.Fatal("tx begin: ", err)
|
||||
}
|
||||
|
||||
// Obtain current state of the dataset.
|
||||
mutex.Lock()
|
||||
var qdb = versions[tx.ID()]
|
||||
if writable {
|
||||
qdb = versions[tx.ID()-1].Copy()
|
||||
}
|
||||
mutex.Unlock()
|
||||
|
||||
// Make sure we commit/rollback the tx at the end and update the state.
|
||||
if writable {
|
||||
defer func() {
|
||||
mutex.Lock()
|
||||
versions[tx.ID()] = qdb
|
||||
mutex.Unlock()
|
||||
|
||||
ok(t, tx.Commit())
|
||||
}()
|
||||
} else {
|
||||
defer tx.Rollback()
|
||||
}
|
||||
|
||||
// Ignore operation if we don't have data yet.
|
||||
if qdb == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Execute handler.
|
||||
handler(tx, qdb)
|
||||
|
||||
// Release a thread back to the scheduling loop.
|
||||
<-threads
|
||||
}(writable, handler)
|
||||
|
||||
i++
|
||||
if i > threadCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until all threads are done.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
type simulateHandler func(tx *Tx, qdb *QuickDB)
|
||||
type simulateHandler func(tx *bolt.Tx, qdb *QuickDB)
|
||||
|
||||
// Retrieves a key from the database and verifies that it is what is expected.
|
||||
func simulateGetHandler(tx *Tx, qdb *QuickDB) {
|
||||
func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) {
|
||||
// Randomly retrieve an existing exist.
|
||||
keys := qdb.Rand()
|
||||
if len(keys) == 0 {
|
||||
@ -153,7 +155,7 @@ func simulateGetHandler(tx *Tx, qdb *QuickDB) {
|
||||
}
|
||||
|
||||
// Inserts a key into the database.
|
||||
func simulatePutHandler(tx *Tx, qdb *QuickDB) {
|
||||
func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) {
|
||||
var err error
|
||||
keys, value := randKeys(), randValue()
|
||||
|
||||
|
10
tx.go
10
tx.go
@ -52,9 +52,9 @@ func (tx *Tx) init(db *DB) {
|
||||
}
|
||||
}
|
||||
|
||||
// id returns the transaction id.
|
||||
func (tx *Tx) id() txid {
|
||||
return tx.meta.txid
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
@ -158,7 +158,7 @@ func (tx *Tx) Commit() error {
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.id(), tx.db.page(tx.meta.freelist))
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
@ -218,7 +218,7 @@ func (tx *Tx) rollback() {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.id())
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
tx.close()
|
||||
|
446
tx_test.go
446
tx_test.go
@ -1,4 +1,4 @@
|
||||
package bolt
|
||||
package bolt_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -6,310 +6,302 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Ensure that committing a closed transaction returns an error.
|
||||
func TestTx_Commit_Closed(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
tx, _ := db.Begin(true)
|
||||
tx.CreateBucket([]byte("foo"))
|
||||
assert.NoError(t, tx.Commit())
|
||||
assert.Equal(t, tx.Commit(), ErrTxClosed)
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
tx, _ := db.Begin(true)
|
||||
tx.CreateBucket([]byte("foo"))
|
||||
ok(t, tx.Commit())
|
||||
equals(t, tx.Commit(), bolt.ErrTxClosed)
|
||||
}
|
||||
|
||||
// Ensure that rolling back a closed transaction returns an error.
|
||||
func TestTx_Rollback_Closed(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
tx, _ := db.Begin(true)
|
||||
assert.NoError(t, tx.Rollback())
|
||||
assert.Equal(t, tx.Rollback(), ErrTxClosed)
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
tx, _ := db.Begin(true)
|
||||
ok(t, tx.Rollback())
|
||||
equals(t, tx.Rollback(), bolt.ErrTxClosed)
|
||||
}
|
||||
|
||||
// Ensure that committing a read-only transaction returns an error.
|
||||
func TestTx_Commit_ReadOnly(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
tx, _ := db.Begin(false)
|
||||
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
tx, _ := db.Begin(false)
|
||||
equals(t, tx.Commit(), bolt.ErrTxNotWritable)
|
||||
}
|
||||
|
||||
// Ensure that a transaction can retrieve a cursor on the root bucket.
|
||||
func TestTx_Cursor(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.CreateBucket([]byte("woojits"))
|
||||
c := tx.Cursor()
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.CreateBucket([]byte("woojits"))
|
||||
c := tx.Cursor()
|
||||
|
||||
k, v := c.First()
|
||||
assert.Equal(t, "widgets", string(k))
|
||||
assert.Nil(t, v)
|
||||
k, v := c.First()
|
||||
equals(t, "widgets", string(k))
|
||||
assert(t, v == nil, "")
|
||||
|
||||
k, v = c.Next()
|
||||
assert.Equal(t, "woojits", string(k))
|
||||
assert.Nil(t, v)
|
||||
k, v = c.Next()
|
||||
equals(t, "woojits", string(k))
|
||||
assert(t, v == nil, "")
|
||||
|
||||
k, v = c.Next()
|
||||
assert.Nil(t, k)
|
||||
assert.Nil(t, v)
|
||||
k, v = c.Next()
|
||||
assert(t, k == nil, "")
|
||||
assert(t, v == nil, "")
|
||||
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that creating a bucket with a read-only transaction returns an error.
|
||||
func TestTx_CreateBucket_ReadOnly(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.View(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("foo"))
|
||||
assert.Nil(t, b)
|
||||
assert.Equal(t, ErrTxNotWritable, err)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("foo"))
|
||||
assert(t, b == nil, "")
|
||||
equals(t, bolt.ErrTxNotWritable, err)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that creating a bucket on a closed transaction returns an error.
|
||||
func TestTx_CreateBucket_Closed(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
tx, _ := db.Begin(true)
|
||||
tx.Commit()
|
||||
b, err := tx.CreateBucket([]byte("foo"))
|
||||
assert.Nil(t, b)
|
||||
assert.Equal(t, ErrTxClosed, err)
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
tx, _ := db.Begin(true)
|
||||
tx.Commit()
|
||||
b, err := tx.CreateBucket([]byte("foo"))
|
||||
assert(t, b == nil, "")
|
||||
equals(t, bolt.ErrTxClosed, err)
|
||||
}
|
||||
|
||||
// Ensure that a Tx can retrieve a bucket.
|
||||
func TestTx_Bucket(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a Tx retrieving a non-existent key returns nil.
|
||||
func TestTx_Get_Missing(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
|
||||
assert.Nil(t, value)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
|
||||
assert(t, value == nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a bucket can be created and retrieved.
|
||||
func TestTx_CreateBucket(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
// Create a bucket.
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Read the bucket through a separate transaction.
|
||||
db.View(func(tx *Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
return nil
|
||||
})
|
||||
// Create a bucket.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
ok(t, err)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Read the bucket through a separate transaction.
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a bucket can be created if it doesn't already exist.
|
||||
func TestTx_CreateBucketIfNotExists(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
assert.NoError(t, err)
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
ok(t, err)
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
assert.NoError(t, err)
|
||||
b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
ok(t, err)
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists([]byte{})
|
||||
assert.Nil(t, b)
|
||||
assert.Equal(t, ErrBucketNameRequired, err)
|
||||
b, err = tx.CreateBucketIfNotExists([]byte{})
|
||||
assert(t, b == nil, "")
|
||||
equals(t, bolt.ErrBucketNameRequired, err)
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists(nil)
|
||||
assert.Nil(t, b)
|
||||
assert.Equal(t, ErrBucketNameRequired, err)
|
||||
return nil
|
||||
})
|
||||
b, err = tx.CreateBucketIfNotExists(nil)
|
||||
assert(t, b == nil, "")
|
||||
equals(t, bolt.ErrBucketNameRequired, err)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Read the bucket through a separate transaction.
|
||||
db.View(func(tx *Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
return nil
|
||||
})
|
||||
// Read the bucket through a separate transaction.
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a bucket cannot be created twice.
|
||||
func TestTx_CreateBucket_Exists(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
// Create a bucket.
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
// Create a bucket.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
ok(t, err)
|
||||
return nil
|
||||
})
|
||||
|
||||
// Create the same bucket again.
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.Nil(t, b)
|
||||
assert.Equal(t, ErrBucketExists, err)
|
||||
return nil
|
||||
})
|
||||
// Create the same bucket again.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert(t, b == nil, "")
|
||||
equals(t, bolt.ErrBucketExists, err)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a bucket is created with a non-blank name.
|
||||
func TestTx_CreateBucket_NameRequired(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
b, err := tx.CreateBucket(nil)
|
||||
assert.Nil(t, b)
|
||||
assert.Equal(t, ErrBucketNameRequired, err)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket(nil)
|
||||
assert(t, b == nil, "")
|
||||
equals(t, bolt.ErrBucketNameRequired, err)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that a bucket can be deleted.
|
||||
func TestTx_DeleteBucket(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
// Create a bucket and add a value.
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
|
||||
// Save root page id.
|
||||
var root pgid
|
||||
db.View(func(tx *Tx) error {
|
||||
root = tx.Bucket([]byte("widgets")).root
|
||||
return nil
|
||||
})
|
||||
// Create a bucket and add a value.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
return nil
|
||||
})
|
||||
|
||||
// Delete the bucket and make sure we can't get the value.
|
||||
db.Update(func(tx *Tx) error {
|
||||
assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
|
||||
assert.Nil(t, tx.Bucket([]byte("widgets")))
|
||||
return nil
|
||||
})
|
||||
// Delete the bucket and make sure we can't get the value.
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
ok(t, tx.DeleteBucket([]byte("widgets")))
|
||||
assert(t, tx.Bucket([]byte("widgets")) == nil, "")
|
||||
return nil
|
||||
})
|
||||
|
||||
db.Update(func(tx *Tx) error {
|
||||
// Verify that the bucket's page is free.
|
||||
assert.Equal(t, []pgid{4, 5}, db.freelist.all())
|
||||
|
||||
// Create the bucket again and make sure there's not a phantom value.
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert.NotNil(t, b)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||
return nil
|
||||
})
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
// Create the bucket again and make sure there's not a phantom value.
|
||||
b, err := tx.CreateBucket([]byte("widgets"))
|
||||
assert(t, b != nil, "")
|
||||
ok(t, err)
|
||||
assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that deleting a bucket on a closed transaction returns an error.
|
||||
func TestTx_DeleteBucket_Closed(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
tx, _ := db.Begin(true)
|
||||
tx.Commit()
|
||||
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
tx, _ := db.Begin(true)
|
||||
tx.Commit()
|
||||
equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed)
|
||||
}
|
||||
|
||||
// Ensure that deleting a bucket with a read-only transaction returns an error.
|
||||
func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.View(func(tx *Tx) error {
|
||||
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that nothing happens when deleting a bucket that doesn't exist.
|
||||
func TestTx_DeleteBucket_NotFound(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that Tx commit handlers are called after a transaction successfully commits.
|
||||
func TestTx_OnCommit(t *testing.T) {
|
||||
var x int
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.OnCommit(func() { x += 1 })
|
||||
tx.OnCommit(func() { x += 2 })
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.OnCommit(func() { x += 1 })
|
||||
tx.OnCommit(func() { x += 2 })
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
assert.Equal(t, 3, x)
|
||||
equals(t, 3, x)
|
||||
}
|
||||
|
||||
// Ensure that Tx commit handlers are NOT called after a transaction rolls back.
|
||||
func TestTx_OnCommit_Rollback(t *testing.T) {
|
||||
var x int
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.OnCommit(func() { x += 1 })
|
||||
tx.OnCommit(func() { x += 2 })
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
return errors.New("rollback this commit")
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.OnCommit(func() { x += 1 })
|
||||
tx.OnCommit(func() { x += 2 })
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
return errors.New("rollback this commit")
|
||||
})
|
||||
assert.Equal(t, 0, x)
|
||||
equals(t, 0, x)
|
||||
}
|
||||
|
||||
// Ensure that the database can be copied to a file path.
|
||||
func TestTx_CopyFile(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
var dest = tempfile()
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||
return nil
|
||||
})
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
var dest = tempfile()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) }))
|
||||
ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) }))
|
||||
|
||||
db2, err := Open(dest, 0600, nil)
|
||||
assert.NoError(t, err)
|
||||
defer db2.Close()
|
||||
db2, err := bolt.Open(dest, 0600, nil)
|
||||
ok(t, err)
|
||||
defer db2.Close()
|
||||
|
||||
db2.View(func(tx *Tx) error {
|
||||
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
|
||||
return nil
|
||||
})
|
||||
db2.View(func(tx *bolt.Tx) error {
|
||||
equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||
equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -336,48 +328,48 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
|
||||
|
||||
// Ensure that Copy handles write errors right.
|
||||
func TestTx_CopyFile_Error_Meta(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||
return nil
|
||||
})
|
||||
|
||||
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
|
||||
assert.EqualError(t, err, "meta copy: error injected for tests")
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||
return nil
|
||||
})
|
||||
|
||||
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) })
|
||||
equals(t, err.Error(), "meta copy: error injected for tests")
|
||||
}
|
||||
|
||||
// Ensure that Copy handles write errors right.
|
||||
func TestTx_CopyFile_Error_Normal(t *testing.T) {
|
||||
withOpenDB(func(db *DB, path string) {
|
||||
db.Update(func(tx *Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||
return nil
|
||||
})
|
||||
|
||||
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
|
||||
assert.EqualError(t, err, "error injected for tests")
|
||||
db := NewTestDB()
|
||||
defer db.Close()
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||
return nil
|
||||
})
|
||||
|
||||
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) })
|
||||
equals(t, err.Error(), "error injected for tests")
|
||||
}
|
||||
|
||||
func ExampleTx_Rollback() {
|
||||
// Open the database.
|
||||
db, _ := Open(tempfile(), 0666, nil)
|
||||
db, _ := bolt.Open(tempfile(), 0666, nil)
|
||||
defer os.Remove(db.Path())
|
||||
defer db.Close()
|
||||
|
||||
// Create a bucket.
|
||||
db.Update(func(tx *Tx) error {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucket([]byte("widgets"))
|
||||
return err
|
||||
})
|
||||
|
||||
// Set a value for a key.
|
||||
db.Update(func(tx *Tx) error {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
})
|
||||
|
||||
@ -388,7 +380,7 @@ func ExampleTx_Rollback() {
|
||||
tx.Rollback()
|
||||
|
||||
// Ensure that our original value is still set.
|
||||
db.View(func(tx *Tx) error {
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||
fmt.Printf("The value for 'foo' is still: %s\n", value)
|
||||
return nil
|
||||
@ -400,12 +392,12 @@ func ExampleTx_Rollback() {
|
||||
|
||||
func ExampleTx_CopyFile() {
|
||||
// Open the database.
|
||||
db, _ := Open(tempfile(), 0666, nil)
|
||||
db, _ := bolt.Open(tempfile(), 0666, nil)
|
||||
defer os.Remove(db.Path())
|
||||
defer db.Close()
|
||||
|
||||
// Create a bucket and a key.
|
||||
db.Update(func(tx *Tx) error {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
tx.CreateBucket([]byte("widgets"))
|
||||
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||
return nil
|
||||
@ -413,15 +405,15 @@ func ExampleTx_CopyFile() {
|
||||
|
||||
// Copy the database to another file.
|
||||
toFile := tempfile()
|
||||
db.View(func(tx *Tx) error { return tx.CopyFile(toFile, 0666) })
|
||||
db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) })
|
||||
defer os.Remove(toFile)
|
||||
|
||||
// Open the cloned database.
|
||||
db2, _ := Open(toFile, 0666, nil)
|
||||
db2, _ := bolt.Open(toFile, 0666, nil)
|
||||
defer db2.Close()
|
||||
|
||||
// Ensure that the key exists in the copy.
|
||||
db2.View(func(tx *Tx) error {
|
||||
db2.View(func(tx *bolt.Tx) error {
|
||||
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||
fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
|
||||
return nil
|
||||
|
Loading…
x
Reference in New Issue
Block a user