Remove testify.

pull/34/head
Ben Johnson 2014-07-26 17:17:03 -06:00
parent ba6badc57f
commit 44e6192d2b
17 changed files with 664 additions and 537 deletions

36
bolt_test.go Normal file
View File

@ -0,0 +1,36 @@
package bolt_test
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert fails the test if the condition is false.
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
}
// ok fails the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
tb.FailNow()
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View File

@ -13,7 +13,6 @@ import (
"testing/quick" "testing/quick"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a bucket that gets a non-existent key returns nil. // Ensure that a bucket that gets a non-existent key returns nil.
@ -23,7 +22,7 @@ func TestBucket_Get_NonExistent(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Nil(t, value) assert(t, value == nil, "")
return nil return nil
}) })
} }
@ -37,7 +36,7 @@ func TestBucket_Get_FromNode(t *testing.T) {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar")) b.Put([]byte("foo"), []byte("bar"))
value := b.Get([]byte("foo")) value := b.Get([]byte("foo"))
assert.Equal(t, value, []byte("bar")) equals(t, []byte("bar"), value)
return nil return nil
}) })
} }
@ -49,8 +48,8 @@ func TestBucket_Get_IncompatibleValue(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
return nil return nil
}) })
} }
@ -62,9 +61,9 @@ func TestBucket_Put(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
assert.NoError(t, err) ok(t, err)
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Equal(t, value, []byte("bar")) equals(t, value, []byte("bar"))
return nil return nil
}) })
} }
@ -76,10 +75,10 @@ func TestBucket_Put_Repeat(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.NoError(t, b.Put([]byte("foo"), []byte("bar"))) ok(t, b.Put([]byte("foo"), []byte("bar")))
assert.NoError(t, b.Put([]byte("foo"), []byte("baz"))) ok(t, b.Put([]byte("foo"), []byte("baz")))
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Equal(t, value, []byte("baz")) equals(t, value, []byte("baz"))
return nil return nil
}) })
} }
@ -94,7 +93,7 @@ func TestBucket_Put_Large(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for i := 1; i < count; i++ { for i := 1; i < count; i++ {
assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))))
} }
return nil return nil
}) })
@ -102,7 +101,7 @@ func TestBucket_Put_Large(t *testing.T) {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for i := 1; i < count; i++ { for i := 1; i < count; i++ {
value := b.Get([]byte(strings.Repeat("0", i*factor))) value := b.Get([]byte(strings.Repeat("0", i*factor)))
assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value) equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value)
} }
return nil return nil
}) })
@ -126,11 +125,11 @@ func TestDB_Put_VeryLarge(t *testing.T) {
for j := 0; j < batchN; j++ { for j := 0; j < batchN; j++ {
k, v := make([]byte, ksize), make([]byte, vsize) k, v := make([]byte, ksize), make([]byte, vsize)
binary.BigEndian.PutUint32(k, uint32(i+j)) binary.BigEndian.PutUint32(k, uint32(i+j))
assert.NoError(t, b.Put(k, v)) ok(t, b.Put(k, v))
} }
return nil return nil
}) })
assert.NoError(t, err) ok(t, err)
} }
} }
@ -141,8 +140,8 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
assert.Equal(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil return nil
}) })
} }
@ -155,7 +154,7 @@ func TestBucket_Put_Closed(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
tx.Rollback() tx.Rollback()
assert.Equal(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar")))
} }
// Ensure that setting a value on a read-only bucket returns an error. // Ensure that setting a value on a read-only bucket returns an error.
@ -164,13 +163,13 @@ func TestBucket_Put_ReadOnly(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
err := b.Put([]byte("foo"), []byte("bar")) err := b.Put([]byte("foo"), []byte("bar"))
assert.Equal(t, err, bolt.ErrTxNotWritable) equals(t, err, bolt.ErrTxNotWritable)
return nil return nil
}) })
} }
@ -183,9 +182,9 @@ func TestBucket_Delete(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
assert.NoError(t, err) ok(t, err)
value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Nil(t, value) assert(t, value == nil, "")
return nil return nil
}) })
} }
@ -197,21 +196,21 @@ func TestBucket_Delete_Large(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
var b, _ = tx.CreateBucket([]byte("widgets")) var b, _ = tx.CreateBucket([]byte("widgets"))
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))))
} }
return nil return nil
}) })
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets")) var b = tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
assert.NoError(t, b.Delete([]byte(strconv.Itoa(i)))) ok(t, b.Delete([]byte(strconv.Itoa(i))))
} }
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets")) var b = tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
assert.Nil(t, b.Get([]byte(strconv.Itoa(i)))) assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "")
} }
return nil return nil
}) })
@ -260,7 +259,7 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
}) })
// Check that a freelist overflow occurred. // Check that a freelist overflow occurred.
assert.NoError(t, err) ok(t, err)
} }
// Ensure that accessing and updating nested buckets is ok across transactions. // Ensure that accessing and updating nested buckets is ok across transactions.
@ -270,14 +269,14 @@ func TestBucket_Nested(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
// Create a widgets bucket. // Create a widgets bucket.
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
// Create a widgets/foo bucket. // Create a widgets/foo bucket.
_, err = b.CreateBucket([]byte("foo")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
// Create a widgets/bar key. // Create a widgets/bar key.
assert.NoError(t, b.Put([]byte("bar"), []byte("0000"))) ok(t, b.Put([]byte("bar"), []byte("0000")))
return nil return nil
}) })
@ -286,7 +285,7 @@ func TestBucket_Nested(t *testing.T) {
// Update widgets/bar. // Update widgets/bar.
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets")) var b = tx.Bucket([]byte("widgets"))
assert.NoError(t, b.Put([]byte("bar"), []byte("xxxx"))) ok(t, b.Put([]byte("bar"), []byte("xxxx")))
return nil return nil
}) })
db.MustCheck() db.MustCheck()
@ -295,7 +294,7 @@ func TestBucket_Nested(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets")) var b = tx.Bucket([]byte("widgets"))
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {
assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))))
} }
return nil return nil
}) })
@ -304,7 +303,7 @@ func TestBucket_Nested(t *testing.T) {
// Insert into widgets/foo/baz. // Insert into widgets/foo/baz.
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets")) var b = tx.Bucket([]byte("widgets"))
assert.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")))
return nil return nil
}) })
db.MustCheck() db.MustCheck()
@ -312,10 +311,10 @@ func TestBucket_Nested(t *testing.T) {
// Verify. // Verify.
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets")) var b = tx.Bucket([]byte("widgets"))
assert.Equal(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz")))
assert.Equal(t, []byte("xxxx"), b.Get([]byte("bar"))) equals(t, []byte("xxxx"), b.Get([]byte("bar")))
for i := 0; i < 10000; i++ { for i := 0; i < 10000; i++ {
assert.Equal(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i))))
} }
return nil return nil
}) })
@ -329,8 +328,8 @@ func TestBucket_Delete_Bucket(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
_, err := b.CreateBucket([]byte("foo")) _, err := b.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
assert.Equal(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo")))
return nil return nil
}) })
} }
@ -346,7 +345,7 @@ func TestBucket_Delete_ReadOnly(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
err := b.Delete([]byte("foo")) err := b.Delete([]byte("foo"))
assert.Equal(t, err, bolt.ErrTxNotWritable) equals(t, err, bolt.ErrTxNotWritable)
return nil return nil
}) })
} }
@ -359,7 +358,7 @@ func TestBucket_Delete_Closed(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
tx.Rollback() tx.Rollback()
assert.Equal(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo")))
} }
// Ensure that deleting a bucket causes nested buckets to be deleted. // Ensure that deleting a bucket causes nested buckets to be deleted.
@ -369,11 +368,11 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
_, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
return nil return nil
}) })
} }
@ -385,22 +384,22 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
_, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
return nil return nil
}) })
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) != nil, "")
assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))) assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "")
assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar"))) assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "")
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz")))
assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) ok(t, tx.DeleteBucket([]byte("widgets")))
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) == nil, "")
return nil return nil
}) })
} }
@ -411,17 +410,17 @@ func TestBucket_DeleteBucket_Large(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
_, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))))
} }
return nil return nil
}) })
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) ok(t, tx.DeleteBucket([]byte("widgets")))
return nil return nil
}) })
@ -434,8 +433,8 @@ func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))) assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "")
return nil return nil
}) })
} }
@ -446,10 +445,10 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
_, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
assert.Equal(t, bolt.ErrIncompatibleValue, err) equals(t, bolt.ErrIncompatibleValue, err)
return nil return nil
}) })
} }
@ -460,9 +459,9 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
assert.Equal(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
return nil return nil
}) })
} }
@ -477,16 +476,16 @@ func TestBucket_NextSequence(t *testing.T) {
// Make sure sequence increments. // Make sure sequence increments.
seq, err := tx.Bucket([]byte("widgets")).NextSequence() seq, err := tx.Bucket([]byte("widgets")).NextSequence()
assert.NoError(t, err) ok(t, err)
assert.Equal(t, seq, uint64(1)) equals(t, seq, uint64(1))
seq, err = tx.Bucket([]byte("widgets")).NextSequence() seq, err = tx.Bucket([]byte("widgets")).NextSequence()
assert.NoError(t, err) ok(t, err)
assert.Equal(t, seq, uint64(2)) equals(t, seq, uint64(2))
// Buckets should be separate. // Buckets should be separate.
seq, err = tx.Bucket([]byte("woojits")).NextSequence() seq, err = tx.Bucket([]byte("woojits")).NextSequence()
assert.NoError(t, err) ok(t, err)
assert.Equal(t, seq, uint64(1)) equals(t, seq, uint64(1))
return nil return nil
}) })
} }
@ -502,8 +501,8 @@ func TestBucket_NextSequence_ReadOnly(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
i, err := b.NextSequence() i, err := b.NextSequence()
assert.Equal(t, i, uint64(0)) equals(t, i, uint64(0))
assert.Equal(t, err, bolt.ErrTxNotWritable) equals(t, err, bolt.ErrTxNotWritable)
return nil return nil
}) })
} }
@ -517,7 +516,7 @@ func TestBucket_NextSequence_Closed(t *testing.T) {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
tx.Rollback() tx.Rollback()
_, err := b.NextSequence() _, err := b.NextSequence()
assert.Equal(t, bolt.ErrTxClosed, err) equals(t, bolt.ErrTxClosed, err)
} }
// Ensure a user can loop over all key/value pairs in a bucket. // Ensure a user can loop over all key/value pairs in a bucket.
@ -534,20 +533,20 @@ func TestBucket_ForEach(t *testing.T) {
err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
switch index { switch index {
case 0: case 0:
assert.Equal(t, k, []byte("bar")) equals(t, k, []byte("bar"))
assert.Equal(t, v, []byte("0002")) equals(t, v, []byte("0002"))
case 1: case 1:
assert.Equal(t, k, []byte("baz")) equals(t, k, []byte("baz"))
assert.Equal(t, v, []byte("0001")) equals(t, v, []byte("0001"))
case 2: case 2:
assert.Equal(t, k, []byte("foo")) equals(t, k, []byte("foo"))
assert.Equal(t, v, []byte("0000")) equals(t, v, []byte("0000"))
} }
index++ index++
return nil return nil
}) })
assert.NoError(t, err) ok(t, err)
assert.Equal(t, index, 3) equals(t, index, 3)
return nil return nil
}) })
} }
@ -570,8 +569,8 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
} }
return nil return nil
}) })
assert.Equal(t, errors.New("marker"), err) equals(t, errors.New("marker"), err)
assert.Equal(t, 2, index) equals(t, 2, index)
return nil return nil
}) })
} }
@ -585,7 +584,7 @@ func TestBucket_ForEach_Closed(t *testing.T) {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
tx.Rollback() tx.Rollback()
err := b.ForEach(func(k, v []byte) error { return nil }) err := b.ForEach(func(k, v []byte) error { return nil })
assert.Equal(t, bolt.ErrTxClosed, err) equals(t, bolt.ErrTxClosed, err)
} }
// Ensure that an error is returned when inserting with an empty key. // Ensure that an error is returned when inserting with an empty key.
@ -595,9 +594,9 @@ func TestBucket_Put_EmptyKey(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar"))
assert.Equal(t, err, bolt.ErrKeyRequired) equals(t, err, bolt.ErrKeyRequired)
err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar"))
assert.Equal(t, err, bolt.ErrKeyRequired) equals(t, err, bolt.ErrKeyRequired)
return nil return nil
}) })
} }
@ -609,7 +608,7 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar"))
assert.Equal(t, err, bolt.ErrKeyTooLarge) equals(t, err, bolt.ErrKeyTooLarge)
return nil return nil
}) })
} }
@ -636,33 +635,33 @@ func TestBucket_Stats(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("woojits")) b := tx.Bucket([]byte("woojits"))
stats := b.Stats() stats := b.Stats()
assert.Equal(t, 1, stats.BranchPageN, "BranchPageN") equals(t, 1, stats.BranchPageN)
assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") equals(t, 0, stats.BranchOverflowN)
assert.Equal(t, 7, stats.LeafPageN, "LeafPageN") equals(t, 7, stats.LeafPageN)
assert.Equal(t, 2, stats.LeafOverflowN, "LeafOverflowN") equals(t, 2, stats.LeafOverflowN)
assert.Equal(t, 501, stats.KeyN, "KeyN") equals(t, 501, stats.KeyN)
assert.Equal(t, 2, stats.Depth, "Depth") equals(t, 2, stats.Depth)
branchInuse := 16 // branch page header branchInuse := 16 // branch page header
branchInuse += 7 * 16 // branch elements branchInuse += 7 * 16 // branch elements
branchInuse += 7 * 3 // branch keys (6 3-byte keys) branchInuse += 7 * 3 // branch keys (6 3-byte keys)
assert.Equal(t, branchInuse, stats.BranchInuse, "BranchInuse") equals(t, branchInuse, stats.BranchInuse)
leafInuse := 7 * 16 // leaf page header leafInuse := 7 * 16 // leaf page header
leafInuse += 501 * 16 // leaf elements leafInuse += 501 * 16 // leaf elements
leafInuse += 500*3 + len(big_key) // leaf keys leafInuse += 500*3 + len(big_key) // leaf keys
leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
assert.Equal(t, leafInuse, stats.LeafInuse, "LeafInuse") equals(t, leafInuse, stats.LeafInuse)
if os.Getpagesize() == 4096 { if os.Getpagesize() == 4096 {
// Incompatible page size // Incompatible page size
assert.Equal(t, 4096, stats.BranchAlloc, "BranchAlloc") equals(t, 4096, stats.BranchAlloc)
assert.Equal(t, 36864, stats.LeafAlloc, "LeafAlloc") equals(t, 36864, stats.LeafAlloc)
} }
assert.Equal(t, 1, stats.BucketN, "BucketN") equals(t, 1, stats.BucketN)
assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") equals(t, 0, stats.InlineBucketN)
assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse") equals(t, 0, stats.InlineBucketInuse)
return nil return nil
}) })
} }
@ -698,17 +697,17 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
s := tx.Bucket([]byte("woojits")).Stats() s := tx.Bucket([]byte("woojits")).Stats()
assert.Equal(t, 100000, s.KeyN, "KeyN") equals(t, 100000, s.KeyN)
assert.Equal(t, 98, s.BranchPageN, "BranchPageN") equals(t, 98, s.BranchPageN)
assert.Equal(t, 0, s.BranchOverflowN, "BranchOverflowN") equals(t, 0, s.BranchOverflowN)
assert.Equal(t, 130984, s.BranchInuse, "BranchInuse") equals(t, 130984, s.BranchInuse)
assert.Equal(t, 401408, s.BranchAlloc, "BranchAlloc") equals(t, 401408, s.BranchAlloc)
assert.Equal(t, 3412, s.LeafPageN, "LeafPageN") equals(t, 3412, s.LeafPageN)
assert.Equal(t, 0, s.LeafOverflowN, "LeafOverflowN") equals(t, 0, s.LeafOverflowN)
assert.Equal(t, 4742482, s.LeafInuse, "LeafInuse") equals(t, 4742482, s.LeafInuse)
assert.Equal(t, 13975552, s.LeafAlloc, "LeafAlloc") equals(t, 13975552, s.LeafAlloc)
return nil return nil
}) })
} }
@ -720,7 +719,7 @@ func TestBucket_Stats_Small(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
// Add a bucket that fits on a single root leaf. // Add a bucket that fits on a single root leaf.
b, err := tx.CreateBucket([]byte("whozawhats")) b, err := tx.CreateBucket([]byte("whozawhats"))
assert.NoError(t, err) ok(t, err)
b.Put([]byte("foo"), []byte("bar")) b.Put([]byte("foo"), []byte("bar"))
return nil return nil
@ -729,22 +728,22 @@ func TestBucket_Stats_Small(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("whozawhats")) b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats() stats := b.Stats()
assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") equals(t, 0, stats.BranchPageN)
assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") equals(t, 0, stats.BranchOverflowN)
assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") equals(t, 0, stats.LeafPageN)
assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") equals(t, 0, stats.LeafOverflowN)
assert.Equal(t, 1, stats.KeyN, "KeyN") equals(t, 1, stats.KeyN)
assert.Equal(t, 1, stats.Depth, "Depth") equals(t, 1, stats.Depth)
assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") equals(t, 0, stats.BranchInuse)
assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") equals(t, 0, stats.LeafInuse)
if os.Getpagesize() == 4096 { if os.Getpagesize() == 4096 {
// Incompatible page size // Incompatible page size
assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") equals(t, 0, stats.BranchAlloc)
assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") equals(t, 0, stats.LeafAlloc)
} }
assert.Equal(t, 1, stats.BucketN, "BucketN") equals(t, 1, stats.BucketN)
assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") equals(t, 1, stats.InlineBucketN)
assert.Equal(t, 16+16+6, stats.InlineBucketInuse, "InlineBucketInuse") equals(t, 16+16+6, stats.InlineBucketInuse)
return nil return nil
}) })
} }
@ -756,29 +755,29 @@ func TestBucket_Stats_EmptyBucket(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
// Add a bucket that fits on a single root leaf. // Add a bucket that fits on a single root leaf.
_, err := tx.CreateBucket([]byte("whozawhats")) _, err := tx.CreateBucket([]byte("whozawhats"))
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
db.MustCheck() db.MustCheck()
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("whozawhats")) b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats() stats := b.Stats()
assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") equals(t, 0, stats.BranchPageN)
assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") equals(t, 0, stats.BranchOverflowN)
assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") equals(t, 0, stats.LeafPageN)
assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") equals(t, 0, stats.LeafOverflowN)
assert.Equal(t, 0, stats.KeyN, "KeyN") equals(t, 0, stats.KeyN)
assert.Equal(t, 1, stats.Depth, "Depth") equals(t, 1, stats.Depth)
assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") equals(t, 0, stats.BranchInuse)
assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") equals(t, 0, stats.LeafInuse)
if os.Getpagesize() == 4096 { if os.Getpagesize() == 4096 {
// Incompatible page size // Incompatible page size
assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") equals(t, 0, stats.BranchAlloc)
assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") equals(t, 0, stats.LeafAlloc)
} }
assert.Equal(t, 1, stats.BucketN, "BucketN") equals(t, 1, stats.BucketN)
assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") equals(t, 1, stats.InlineBucketN)
assert.Equal(t, 16, stats.InlineBucketInuse, "InlineBucketInuse") equals(t, 16, stats.InlineBucketInuse)
return nil return nil
}) })
} }
@ -790,17 +789,17 @@ func TestBucket_Stats_Nested(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i)))
} }
bar, err := b.CreateBucket([]byte("bar")) bar, err := b.CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
} }
baz, err := bar.CreateBucket([]byte("baz")) baz, err := bar.CreateBucket([]byte("baz"))
assert.NoError(t, err) ok(t, err)
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
} }
@ -812,13 +811,13 @@ func TestBucket_Stats_Nested(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("foo")) b := tx.Bucket([]byte("foo"))
stats := b.Stats() stats := b.Stats()
assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") equals(t, 0, stats.BranchPageN)
assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") equals(t, 0, stats.BranchOverflowN)
assert.Equal(t, 2, stats.LeafPageN, "LeafPageN") equals(t, 2, stats.LeafPageN)
assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") equals(t, 0, stats.LeafOverflowN)
assert.Equal(t, 122, stats.KeyN, "KeyN") equals(t, 122, stats.KeyN)
assert.Equal(t, 3, stats.Depth, "Depth") equals(t, 3, stats.Depth)
assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") equals(t, 0, stats.BranchInuse)
foo := 16 // foo (pghdr) foo := 16 // foo (pghdr)
foo += 101 * 16 // foo leaf elements foo += 101 * 16 // foo leaf elements
@ -834,15 +833,15 @@ func TestBucket_Stats_Nested(t *testing.T) {
baz += 10 * 16 // baz leaf elements baz += 10 * 16 // baz leaf elements
baz += 10 + 10 // baz leaf key/values baz += 10 + 10 // baz leaf key/values
assert.Equal(t, foo+bar+baz, stats.LeafInuse, "LeafInuse") equals(t, foo+bar+baz, stats.LeafInuse)
if os.Getpagesize() == 4096 { if os.Getpagesize() == 4096 {
// Incompatible page size // Incompatible page size
assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") equals(t, 0, stats.BranchAlloc)
assert.Equal(t, 8192, stats.LeafAlloc, "LeafAlloc") equals(t, 8192, stats.LeafAlloc)
} }
assert.Equal(t, 3, stats.BucketN, "BucketN") equals(t, 3, stats.BucketN)
assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") equals(t, 1, stats.InlineBucketN)
assert.Equal(t, baz, stats.InlineBucketInuse, "InlineBucketInuse") equals(t, baz, stats.InlineBucketInuse)
return nil return nil
}) })
} }
@ -873,22 +872,22 @@ func TestBucket_Stats_Large(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
stats := b.Stats() stats := b.Stats()
assert.Equal(t, 13, stats.BranchPageN, "BranchPageN") equals(t, 13, stats.BranchPageN)
assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") equals(t, 0, stats.BranchOverflowN)
assert.Equal(t, 1196, stats.LeafPageN, "LeafPageN") equals(t, 1196, stats.LeafPageN)
assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") equals(t, 0, stats.LeafOverflowN)
assert.Equal(t, 100000, stats.KeyN, "KeyN") equals(t, 100000, stats.KeyN)
assert.Equal(t, 3, stats.Depth, "Depth") equals(t, 3, stats.Depth)
assert.Equal(t, 25257, stats.BranchInuse, "BranchInuse") equals(t, 25257, stats.BranchInuse)
assert.Equal(t, 2596916, stats.LeafInuse, "LeafInuse") equals(t, 2596916, stats.LeafInuse)
if os.Getpagesize() == 4096 { if os.Getpagesize() == 4096 {
// Incompatible page size // Incompatible page size
assert.Equal(t, 53248, stats.BranchAlloc, "BranchAlloc") equals(t, 53248, stats.BranchAlloc)
assert.Equal(t, 4898816, stats.LeafAlloc, "LeafAlloc") equals(t, 4898816, stats.LeafAlloc)
} }
assert.Equal(t, 1, stats.BucketN, "BucketN") equals(t, 1, stats.BucketN)
assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") equals(t, 0, stats.InlineBucketN)
assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse") equals(t, 0, stats.InlineBucketInuse)
return nil return nil
}) })
} }
@ -960,20 +959,20 @@ func TestBucket_Put_Multiple(t *testing.T) {
err := db.Update(func(tx *bolt.Tx) error { err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for _, item := range items { for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value)) ok(t, b.Put(item.Key, item.Value))
} }
return nil return nil
}) })
assert.NoError(t, err) ok(t, err)
// Verify all items exist. // Verify all items exist.
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for _, item := range items { for _, item := range items {
value := b.Get(item.Key) value := b.Get(item.Key)
if !assert.Equal(t, item.Value, value) { if !bytes.Equal(item.Value, value) {
db.CopyTempFile() db.CopyTempFile()
t.FailNow() t.Fatalf("exp=%x; got=%x", item.Value, value)
} }
} }
return nil return nil
@ -1002,18 +1001,18 @@ func TestBucket_Delete_Quick(t *testing.T) {
err := db.Update(func(tx *bolt.Tx) error { err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for _, item := range items { for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value)) ok(t, b.Put(item.Key, item.Value))
} }
return nil return nil
}) })
assert.NoError(t, err) ok(t, err)
// Remove items one at a time and check consistency. // Remove items one at a time and check consistency.
for _, item := range items { for _, item := range items {
err := db.Update(func(tx *bolt.Tx) error { err := db.Update(func(tx *bolt.Tx) error {
return tx.Bucket([]byte("widgets")).Delete(item.Key) return tx.Bucket([]byte("widgets")).Delete(item.Key)
}) })
assert.NoError(t, err) ok(t, err)
} }
// Anything before our deletion index should be nil. // Anything before our deletion index should be nil.

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a list of buckets can be retrieved. // Ensure that a list of buckets can be retrieved.
@ -20,7 +19,7 @@ func TestBuckets(t *testing.T) {
}) })
db.Close() db.Close()
output := run("buckets", path) output := run("buckets", path)
assert.Equal(t, "whatchits\nwidgets\nwoojits", output) equals(t, "whatchits\nwidgets\nwoojits", output)
}) })
} }
@ -28,5 +27,5 @@ func TestBuckets(t *testing.T) {
func TestBucketsDBNotFound(t *testing.T) { func TestBucketsDBNotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("buckets", "no/such/db") output := run("buckets", "no/such/db")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a database can be exported. // Ensure that a database can be exported.
@ -32,7 +31,7 @@ func TestExport(t *testing.T) {
}) })
db.Close() db.Close()
output := run("export", path) output := run("export", path)
assert.Equal(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output) equals(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output)
}) })
} }
@ -40,5 +39,5 @@ func TestExport(t *testing.T) {
func TestExport_NotFound(t *testing.T) { func TestExport_NotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("export", "no/such/db") output := run("export", "no/such/db")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a value can be retrieved from the CLI. // Ensure that a value can be retrieved from the CLI.
@ -19,7 +18,7 @@ func TestGet(t *testing.T) {
}) })
db.Close() db.Close()
output := run("get", path, "widgets", "foo") output := run("get", path, "widgets", "foo")
assert.Equal(t, "bar", output) equals(t, "bar", output)
}) })
} }
@ -27,7 +26,7 @@ func TestGet(t *testing.T) {
func TestGetDBNotFound(t *testing.T) { func TestGetDBNotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("get", "no/such/db", "widgets", "foo") output := run("get", "no/such/db", "widgets", "foo")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }
// Ensure that an error is reported if the bucket is not found. // Ensure that an error is reported if the bucket is not found.
@ -36,7 +35,7 @@ func TestGetBucketNotFound(t *testing.T) {
open(func(db *bolt.DB, path string) { open(func(db *bolt.DB, path string) {
db.Close() db.Close()
output := run("get", path, "widgets", "foo") output := run("get", path, "widgets", "foo")
assert.Equal(t, "bucket not found: widgets", output) equals(t, "bucket not found: widgets", output)
}) })
} }
@ -50,6 +49,6 @@ func TestGetKeyNotFound(t *testing.T) {
}) })
db.Close() db.Close()
output := run("get", path, "widgets", "foo") output := run("get", path, "widgets", "foo")
assert.Equal(t, "key not found: foo", output) equals(t, "key not found: foo", output)
}) })
} }

View File

@ -6,7 +6,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a database can be imported. // Ensure that a database can be imported.
@ -15,32 +14,30 @@ func TestImport(t *testing.T) {
// Write input file. // Write input file.
input := tempfile() input := tempfile()
assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600)) ok(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600))
// Import database. // Import database.
path := tempfile() path := tempfile()
output := run("import", path, "--input", input) output := run("import", path, "--input", input)
assert.Equal(t, ``, output) equals(t, ``, output)
// Open database and verify contents. // Open database and verify contents.
db, err := bolt.Open(path, 0600, nil) db, err := bolt.Open(path, 0600, nil)
assert.NoError(t, err) ok(t, err)
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("empty"))) assert(t, tx.Bucket([]byte("empty")) != nil, "")
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
if assert.NotNil(t, b) { assert(t, b != nil, "")
assert.Equal(t, []byte("0000"), b.Get([]byte("foo"))) equals(t, []byte("0000"), b.Get([]byte("foo")))
assert.Equal(t, []byte(""), b.Get([]byte("bar"))) equals(t, []byte(""), b.Get([]byte("bar")))
}
b = tx.Bucket([]byte("woojits")) b = tx.Bucket([]byte("woojits"))
if assert.NotNil(t, b) { assert(t, b != nil, "")
assert.Equal(t, []byte("XXXX"), b.Get([]byte("baz"))) equals(t, []byte("XXXX"), b.Get([]byte("baz")))
b = b.Bucket([]byte("woojits/subbucket")) b = b.Bucket([]byte("woojits/subbucket"))
assert.Equal(t, []byte("A"), b.Get([]byte("bat"))) equals(t, []byte("A"), b.Get([]byte("bat")))
}
return nil return nil
}) })
@ -51,5 +48,5 @@ func TestImport(t *testing.T) {
func TestImport_NotFound(t *testing.T) { func TestImport_NotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("import", "path/to/db", "--input", "no/such/file") output := run("import", "path/to/db", "--input", "no/such/file")
assert.Equal(t, "open no/such/file: no such file or directory", output) equals(t, "open no/such/file: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a database info can be printed. // Ensure that a database info can be printed.
@ -20,7 +19,7 @@ func TestInfo(t *testing.T) {
}) })
db.Close() db.Close()
output := run("info", path) output := run("info", path)
assert.Equal(t, `Page Size: 4096`, output) equals(t, `Page Size: 4096`, output)
}) })
} }
@ -28,5 +27,5 @@ func TestInfo(t *testing.T) {
func TestInfo_NotFound(t *testing.T) { func TestInfo_NotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("info", "no/such/db") output := run("info", "no/such/db")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }

View File

@ -5,7 +5,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a list of keys can be retrieved for a given bucket. // Ensure that a list of keys can be retrieved for a given bucket.
@ -21,7 +20,7 @@ func TestKeys(t *testing.T) {
}) })
db.Close() db.Close()
output := run("keys", path, "widgets") output := run("keys", path, "widgets")
assert.Equal(t, "0001\n0002\n0003", output) equals(t, "0001\n0002\n0003", output)
}) })
} }
@ -29,7 +28,7 @@ func TestKeys(t *testing.T) {
func TestKeysDBNotFound(t *testing.T) { func TestKeysDBNotFound(t *testing.T) {
SetTestMode(true) SetTestMode(true)
output := run("keys", "no/such/db", "widgets") output := run("keys", "no/such/db", "widgets")
assert.Equal(t, "stat no/such/db: no such file or directory", output) equals(t, "stat no/such/db: no such file or directory", output)
} }
// Ensure that an error is reported if the bucket is not found. // Ensure that an error is reported if the bucket is not found.
@ -38,6 +37,6 @@ func TestKeysBucketNotFound(t *testing.T) {
open(func(db *bolt.DB, path string) { open(func(db *bolt.DB, path string) {
db.Close() db.Close()
output := run("keys", path, "widgets") output := run("keys", path, "widgets")
assert.Equal(t, "bucket not found: widgets", output) equals(t, "bucket not found: widgets", output)
}) })
} }

View File

@ -1,9 +1,14 @@
package main_test package main_test
import ( import (
"fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath"
"reflect"
"runtime"
"strings" "strings"
"testing"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
@ -35,3 +40,30 @@ func tempfile() string {
os.Remove(f.Name()) os.Remove(f.Name())
return f.Name() return f.Name()
} }
// assert fails the test if the condition is false.
func assert(tb testing.TB, condition bool, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...)
tb.FailNow()
}
}
// ok fails the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
tb.FailNow()
}
}
// equals fails the test if exp is not equal to act.
func equals(tb testing.TB, exp, act interface{}) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
tb.FailNow()
}
}

View File

@ -7,7 +7,6 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
. "github.com/boltdb/bolt/cmd/bolt" . "github.com/boltdb/bolt/cmd/bolt"
"github.com/stretchr/testify/assert"
) )
func TestStats(t *testing.T) { func TestStats(t *testing.T) {
@ -40,7 +39,7 @@ func TestStats(t *testing.T) {
}) })
db.Close() db.Close()
output := run("stats", path, "b") output := run("stats", path, "b")
assert.Equal(t, "Aggregate statistics for 2 buckets\n\n"+ equals(t, "Aggregate statistics for 2 buckets\n\n"+
"Page count statistics\n"+ "Page count statistics\n"+
"\tNumber of logical branch pages: 0\n"+ "\tNumber of logical branch pages: 0\n"+
"\tNumber of physical branch overflow pages: 0\n"+ "\tNumber of physical branch overflow pages: 0\n"+

View File

@ -8,7 +8,6 @@ import (
"testing/quick" "testing/quick"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that a cursor can return a reference to the bucket that created it. // Ensure that a cursor can return a reference to the bucket that created it.
@ -18,7 +17,7 @@ func TestCursor_Bucket(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucket([]byte("widgets")) b, _ := tx.CreateBucket([]byte("widgets"))
c := b.Cursor() c := b.Cursor()
assert.Equal(t, b, c.Bucket()) equals(t, b, c.Bucket())
return nil return nil
}) })
} }
@ -29,12 +28,12 @@ func TestCursor_Seek(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
assert.NoError(t, b.Put([]byte("foo"), []byte("0001"))) ok(t, b.Put([]byte("foo"), []byte("0001")))
assert.NoError(t, b.Put([]byte("bar"), []byte("0002"))) ok(t, b.Put([]byte("bar"), []byte("0002")))
assert.NoError(t, b.Put([]byte("baz"), []byte("0003"))) ok(t, b.Put([]byte("baz"), []byte("0003")))
_, err = b.CreateBucket([]byte("bkt")) _, err = b.CreateBucket([]byte("bkt"))
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
@ -42,28 +41,28 @@ func TestCursor_Seek(t *testing.T) {
// Exact match should go to the key. // Exact match should go to the key.
k, v := c.Seek([]byte("bar")) k, v := c.Seek([]byte("bar"))
assert.Equal(t, []byte("bar"), k) equals(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v) equals(t, []byte("0002"), v)
// Inexact match should go to the next key. // Inexact match should go to the next key.
k, v = c.Seek([]byte("bas")) k, v = c.Seek([]byte("bas"))
assert.Equal(t, []byte("baz"), k) equals(t, []byte("baz"), k)
assert.Equal(t, []byte("0003"), v) equals(t, []byte("0003"), v)
// Low key should go to the first key. // Low key should go to the first key.
k, v = c.Seek([]byte("")) k, v = c.Seek([]byte(""))
assert.Equal(t, []byte("bar"), k) equals(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v) equals(t, []byte("0002"), v)
// High key should return no key. // High key should return no key.
k, v = c.Seek([]byte("zzz")) k, v = c.Seek([]byte("zzz"))
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
// Buckets should return their key but no value. // Buckets should return their key but no value.
k, v = c.Seek([]byte("bkt")) k, v = c.Seek([]byte("bkt"))
assert.Equal(t, []byte("bkt"), k) equals(t, []byte("bkt"), k)
assert.Nil(t, v) assert(t, v == nil, "")
return nil return nil
}) })
@ -98,13 +97,13 @@ func TestCursor_Delete(t *testing.T) {
} }
c.Seek([]byte("sub")) c.Seek([]byte("sub"))
err := c.Delete() err := c.Delete()
assert.Equal(t, err, bolt.ErrIncompatibleValue) equals(t, err, bolt.ErrIncompatibleValue)
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.Equal(t, b.Stats().KeyN, count/2+1) equals(t, b.Stats().KeyN, count/2+1)
return nil return nil
}) })
} }
@ -144,16 +143,16 @@ func TestCursor_Seek_Large(t *testing.T) {
// The last seek is beyond the end of the the range so // The last seek is beyond the end of the the range so
// it should return nil. // it should return nil.
if i == count-1 { if i == count-1 {
assert.Nil(t, k) assert(t, k == nil, "")
continue continue
} }
// Otherwise we should seek to the exact key or the next key. // Otherwise we should seek to the exact key or the next key.
num := binary.BigEndian.Uint64(k) num := binary.BigEndian.Uint64(k)
if i%2 == 0 { if i%2 == 0 {
assert.Equal(t, uint64(i), num) equals(t, uint64(i), num)
} else { } else {
assert.Equal(t, uint64(i+1), num) equals(t, uint64(i+1), num)
} }
} }
@ -172,8 +171,8 @@ func TestCursor_EmptyBucket(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First() k, v := c.First()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
return nil return nil
}) })
} }
@ -190,8 +189,8 @@ func TestCursor_EmptyBucketReverse(t *testing.T) {
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last() k, v := c.Last()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
return nil return nil
}) })
} }
@ -212,24 +211,24 @@ func TestCursor_Iterate_Leaf(t *testing.T) {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First() k, v := c.First()
assert.Equal(t, string(k), "bar") equals(t, string(k), "bar")
assert.Equal(t, v, []byte{1}) equals(t, v, []byte{1})
k, v = c.Next() k, v = c.Next()
assert.Equal(t, string(k), "baz") equals(t, string(k), "baz")
assert.Equal(t, v, []byte{}) equals(t, v, []byte{})
k, v = c.Next() k, v = c.Next()
assert.Equal(t, string(k), "foo") equals(t, string(k), "foo")
assert.Equal(t, v, []byte{0}) equals(t, v, []byte{0})
k, v = c.Next() k, v = c.Next()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
k, v = c.Next() k, v = c.Next()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
tx.Rollback() tx.Rollback()
} }
@ -250,24 +249,24 @@ func TestCursor_LeafRootReverse(t *testing.T) {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last() k, v := c.Last()
assert.Equal(t, string(k), "foo") equals(t, string(k), "foo")
assert.Equal(t, v, []byte{0}) equals(t, v, []byte{0})
k, v = c.Prev() k, v = c.Prev()
assert.Equal(t, string(k), "baz") equals(t, string(k), "baz")
assert.Equal(t, v, []byte{}) equals(t, v, []byte{})
k, v = c.Prev() k, v = c.Prev()
assert.Equal(t, string(k), "bar") equals(t, string(k), "bar")
assert.Equal(t, v, []byte{1}) equals(t, v, []byte{1})
k, v = c.Prev() k, v = c.Prev()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
k, v = c.Prev() k, v = c.Prev()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
tx.Rollback() tx.Rollback()
} }
@ -288,16 +287,16 @@ func TestCursor_Restart(t *testing.T) {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
k, _ := c.First() k, _ := c.First()
assert.Equal(t, string(k), "bar") equals(t, string(k), "bar")
k, _ = c.Next() k, _ = c.Next()
assert.Equal(t, string(k), "foo") equals(t, string(k), "foo")
k, _ = c.First() k, _ = c.First()
assert.Equal(t, string(k), "bar") equals(t, string(k), "bar")
k, _ = c.Next() k, _ = c.Next()
assert.Equal(t, string(k), "foo") equals(t, string(k), "foo")
tx.Rollback() tx.Rollback()
} }
@ -313,9 +312,9 @@ func TestCursor_QuickCheck(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for _, item := range items { for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value)) ok(t, b.Put(item.Key, item.Value))
} }
assert.NoError(t, tx.Commit()) ok(t, tx.Commit())
// Sort test data. // Sort test data.
sort.Sort(items) sort.Sort(items)
@ -325,11 +324,11 @@ func TestCursor_QuickCheck(t *testing.T) {
tx, _ = db.Begin(false) tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
assert.Equal(t, k, items[index].Key) equals(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value) equals(t, v, items[index].Value)
index++ index++
} }
assert.Equal(t, len(items), index) equals(t, len(items), index)
tx.Rollback() tx.Rollback()
return true return true
@ -350,9 +349,9 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
for _, item := range items { for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value)) ok(t, b.Put(item.Key, item.Value))
} }
assert.NoError(t, tx.Commit()) ok(t, tx.Commit())
// Sort test data. // Sort test data.
sort.Sort(revtestdata(items)) sort.Sort(revtestdata(items))
@ -362,11 +361,11 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
tx, _ = db.Begin(false) tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
assert.Equal(t, k, items[index].Key) equals(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value) equals(t, v, items[index].Value)
index++ index++
} }
assert.Equal(t, len(items), index) equals(t, len(items), index)
tx.Rollback() tx.Rollback()
return true return true
@ -383,13 +382,13 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("foo")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("bar")) _, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("baz")) _, err = b.CreateBucket([]byte("baz"))
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
@ -397,9 +396,9 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() { for k, v := c.First(); k != nil; k, v = c.Next() {
names = append(names, string(k)) names = append(names, string(k))
assert.Nil(t, v) assert(t, v == nil, "")
} }
assert.Equal(t, names, []string{"bar", "baz", "foo"}) equals(t, names, []string{"bar", "baz", "foo"})
return nil return nil
}) })
} }
@ -411,13 +410,13 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("foo")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("bar")) _, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err) ok(t, err)
_, err = b.CreateBucket([]byte("baz")) _, err = b.CreateBucket([]byte("baz"))
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
@ -425,9 +424,9 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() { for k, v := c.Last(); k != nil; k, v = c.Prev() {
names = append(names, string(k)) names = append(names, string(k))
assert.Nil(t, v) assert(t, v == nil, "")
} }
assert.Equal(t, names, []string{"foo", "baz", "bar"}) equals(t, names, []string{"foo", "baz", "bar"})
return nil return nil
}) })
} }

View File

@ -14,7 +14,6 @@ import (
"time" "time"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/stretchr/testify/assert"
) )
var statsFlag = flag.Bool("stats", false, "show performance stats") var statsFlag = flag.Bool("stats", false, "show performance stats")
@ -22,8 +21,8 @@ var statsFlag = flag.Bool("stats", false, "show performance stats")
// Ensure that opening a database with a bad path returns an error. // Ensure that opening a database with a bad path returns an error.
func TestOpen_BadPath(t *testing.T) { func TestOpen_BadPath(t *testing.T) {
db, err := bolt.Open("", 0666, nil) db, err := bolt.Open("", 0666, nil)
assert.Error(t, err) assert(t, err != nil, "err: %s", err)
assert.Nil(t, db) assert(t, db == nil, "")
} }
// Ensure that a database can be opened without error. // Ensure that a database can be opened without error.
@ -31,10 +30,10 @@ func TestOpen(t *testing.T) {
path := tempfile() path := tempfile()
defer os.Remove(path) defer os.Remove(path)
db, err := bolt.Open(path, 0666, nil) db, err := bolt.Open(path, 0666, nil)
assert.NotNil(t, db) assert(t, db != nil, "")
assert.NoError(t, err) ok(t, err)
assert.Equal(t, db.Path(), path) equals(t, db.Path(), path)
assert.NoError(t, db.Close()) ok(t, db.Close())
} }
// Ensure that opening an already open database file will timeout. // Ensure that opening an already open database file will timeout.
@ -48,15 +47,15 @@ func TestOpen_Timeout(t *testing.T) {
// Open a data file. // Open a data file.
db0, err := bolt.Open(path, 0666, nil) db0, err := bolt.Open(path, 0666, nil)
assert.NotNil(t, db0) assert(t, db0 != nil, "")
assert.NoError(t, err) ok(t, err)
// Attempt to open the database again. // Attempt to open the database again.
start := time.Now() start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond})
assert.Nil(t, db1) assert(t, db1 == nil, "")
assert.Equal(t, bolt.ErrTimeout, err) equals(t, bolt.ErrTimeout, err)
assert.True(t, time.Since(start) > 100*time.Millisecond) assert(t, time.Since(start) > 100*time.Millisecond, "")
db0.Close() db0.Close()
} }
@ -72,8 +71,8 @@ func TestOpen_Wait(t *testing.T) {
// Open a data file. // Open a data file.
db0, err := bolt.Open(path, 0666, nil) db0, err := bolt.Open(path, 0666, nil)
assert.NotNil(t, db0) assert(t, db0 != nil, "")
assert.NoError(t, err) ok(t, err)
// Close it in just a bit. // Close it in just a bit.
time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) time.AfterFunc(100*time.Millisecond, func() { db0.Close() })
@ -81,9 +80,9 @@ func TestOpen_Wait(t *testing.T) {
// Attempt to open the database again. // Attempt to open the database again.
start := time.Now() start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond})
assert.NotNil(t, db1) assert(t, db1 != nil, "")
assert.NoError(t, err) ok(t, err)
assert.True(t, time.Since(start) > 100*time.Millisecond) assert(t, time.Since(start) > 100*time.Millisecond, "")
} }
// Ensure that a re-opened database is consistent. // Ensure that a re-opened database is consistent.
@ -92,13 +91,13 @@ func TestOpen_Check(t *testing.T) {
defer os.Remove(path) defer os.Remove(path)
db, err := bolt.Open(path, 0666, nil) db, err := bolt.Open(path, 0666, nil)
assert.NoError(t, err) ok(t, err)
assert.NoError(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
db.Close() db.Close()
db, err = bolt.Open(path, 0666, nil) db, err = bolt.Open(path, 0666, nil)
assert.NoError(t, err) ok(t, err)
assert.NoError(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() }))
db.Close() db.Close()
} }
@ -108,10 +107,9 @@ func TestDB_Open_FileError(t *testing.T) {
defer os.Remove(path) defer os.Remove(path)
_, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil)
if err, _ := err.(*os.PathError); assert.Error(t, err) { assert(t, err.(*os.PathError) != nil, "")
assert.Equal(t, path+"/youre-not-my-real-parent", err.Path) equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path)
assert.Equal(t, "open", err.Op) equals(t, "open", err.(*os.PathError).Op)
}
} }
// Ensure that write errors to the meta file handler during initialization are returned. // Ensure that write errors to the meta file handler during initialization are returned.
@ -125,87 +123,24 @@ func TestDB_Open_FileTooSmall(t *testing.T) {
defer os.Remove(path) defer os.Remove(path)
db, err := bolt.Open(path, 0666, nil) db, err := bolt.Open(path, 0666, nil)
assert.NoError(t, err) ok(t, err)
db.Close() db.Close()
// corrupt the database // corrupt the database
assert.NoError(t, os.Truncate(path, int64(os.Getpagesize()))) ok(t, os.Truncate(path, int64(os.Getpagesize())))
db, err = bolt.Open(path, 0666, nil) db, err = bolt.Open(path, 0666, nil)
assert.Equal(t, errors.New("file size too small"), err) equals(t, errors.New("file size too small"), err)
} }
// TODO(benbjohnson): Test corruption at every byte of the first two pages. // TODO(benbjohnson): Test corruption at every byte of the first two pages.
/*
// Ensure that corrupt meta0 page errors get returned.
func TestDB_Open_CorruptMeta0(t *testing.T) {
var m meta
m.magic = magic
m.version = version
m.pageSize = 0x8000
path := tempfile()
defer os.Remove(path)
// Create a file with bad magic.
b := make([]byte, 0x10000)
p0, p1 := (*page)(unsafe.Pointer(&b[0x0000])), (*page)(unsafe.Pointer(&b[0x8000]))
p0.meta().magic = 0
p0.meta().version = version
p1.meta().magic = magic
p1.meta().version = version
err := ioutil.WriteFile(path, b, 0666)
assert.NoError(t, err)
// Open the database.
_, err = bolt.Open(path, 0666, nil)
assert.Equal(t, err, errors.New("meta0 error: invalid database"))
}
// Ensure that a corrupt meta page checksum causes the open to fail.
func TestDB_Open_MetaChecksumError(t *testing.T) {
for i := 0; i < 2; i++ {
path := tempfile()
defer os.Remove(path)
db, err := bolt.Open(path, 0600, nil)
pageSize := db.pageSize
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucket([]byte("woojits"))
return err
})
db.Close()
// Change a single byte in the meta page.
f, _ := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)
f.WriteAt([]byte{1}, int64((i*pageSize)+(pageHeaderSize+12)))
f.Sync()
f.Close()
// Reopen the database.
_, err = bolt.Open(path, 0600, nil)
if assert.Error(t, err) {
if i == 0 {
assert.Equal(t, "meta0 error: checksum error", err.Error())
} else {
assert.Equal(t, "meta1 error: checksum error", err.Error())
}
}
}
}
*/
// Ensure that a database cannot open a transaction when it's not open. // Ensure that a database cannot open a transaction when it's not open.
func TestDB_Begin_DatabaseNotOpen(t *testing.T) { func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
var db bolt.DB var db bolt.DB
tx, err := db.Begin(false) tx, err := db.Begin(false)
assert.Nil(t, tx) assert(t, tx == nil, "")
assert.Equal(t, err, bolt.ErrDatabaseNotOpen) equals(t, err, bolt.ErrDatabaseNotOpen)
} }
// Ensure that a read-write transaction can be retrieved. // Ensure that a read-write transaction can be retrieved.
@ -213,19 +148,19 @@ func TestDB_BeginRW(t *testing.T) {
db := NewTestDB() db := NewTestDB()
defer db.Close() defer db.Close()
tx, err := db.Begin(true) tx, err := db.Begin(true)
assert.NotNil(t, tx) assert(t, tx != nil, "")
assert.NoError(t, err) ok(t, err)
assert.Equal(t, tx.DB(), db) assert(t, tx.DB() == db.DB, "")
assert.Equal(t, tx.Writable(), true) equals(t, tx.Writable(), true)
assert.NoError(t, tx.Commit()) ok(t, tx.Commit())
} }
// Ensure that opening a transaction while the DB is closed returns an error. // Ensure that opening a transaction while the DB is closed returns an error.
func TestDB_BeginRW_Closed(t *testing.T) { func TestDB_BeginRW_Closed(t *testing.T) {
var db bolt.DB var db bolt.DB
tx, err := db.Begin(true) tx, err := db.Begin(true)
assert.Equal(t, err, bolt.ErrDatabaseNotOpen) equals(t, err, bolt.ErrDatabaseNotOpen)
assert.Nil(t, tx) assert(t, tx == nil, "")
} }
// Ensure a database can provide a transactional block. // Ensure a database can provide a transactional block.
@ -240,13 +175,13 @@ func TestDB_Update(t *testing.T) {
b.Delete([]byte("foo")) b.Delete([]byte("foo"))
return nil return nil
}) })
assert.NoError(t, err) ok(t, err)
err = db.View(func(tx *bolt.Tx) error { err = db.View(func(tx *bolt.Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil return nil
}) })
assert.NoError(t, err) ok(t, err)
} }
// Ensure a closed database returns an error while running a transaction block // Ensure a closed database returns an error while running a transaction block
@ -256,23 +191,87 @@ func TestDB_Update_Closed(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
return nil return nil
}) })
assert.Equal(t, err, bolt.ErrDatabaseNotOpen) equals(t, err, bolt.ErrDatabaseNotOpen)
} }
// Ensure a panic occurs while trying to commit a managed transaction. // Ensure a panic occurs while trying to commit a managed transaction.
func TestDB_Update_ManualCommitAndRollback(t *testing.T) { func TestDB_Update_ManualCommit(t *testing.T) {
var db bolt.DB db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) func() {
assert.Panics(t, func() { tx.Commit() }) defer func() {
assert.Panics(t, func() { tx.Rollback() }) if r := recover(); r != nil {
ok = true
}
}()
tx.Commit()
}()
return nil return nil
}) })
db.View(func(tx *bolt.Tx) error { assert(t, ok, "expected panic")
assert.Panics(t, func() { tx.Commit() }) }
assert.Panics(t, func() { tx.Rollback() })
// Ensure a panic occurs while trying to rollback a managed transaction.
func TestDB_Update_ManualRollback(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Rollback()
}()
return nil return nil
}) })
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to commit a managed transaction.
func TestDB_View_ManualCommit(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Commit()
}()
return nil
})
assert(t, ok, "expected panic")
}
// Ensure a panic occurs while trying to rollback a managed transaction.
func TestDB_View_ManualRollback(t *testing.T) {
db := NewTestDB()
defer db.Close()
var ok bool
db.Update(func(tx *bolt.Tx) error {
func() {
defer func() {
if r := recover(); r != nil {
ok = true
}
}()
tx.Rollback()
}()
return nil
})
assert(t, ok, "expected panic")
} }
// Ensure a write transaction that panics does not hold open locks. // Ensure a write transaction that panics does not hold open locks.
@ -297,11 +296,11 @@ func TestDB_Update_Panic(t *testing.T) {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
return err return err
}) })
assert.NoError(t, err) ok(t, err)
// Verify that our change persisted. // Verify that our change persisted.
err = db.Update(func(tx *bolt.Tx) error { err = db.Update(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) != nil, "")
return nil return nil
}) })
} }
@ -313,7 +312,7 @@ func TestDB_View_Error(t *testing.T) {
err := db.View(func(tx *bolt.Tx) error { err := db.View(func(tx *bolt.Tx) error {
return errors.New("xxx") return errors.New("xxx")
}) })
assert.Equal(t, errors.New("xxx"), err) equals(t, errors.New("xxx"), err)
} }
// Ensure a read transaction that panics does not hold open locks. // Ensure a read transaction that panics does not hold open locks.
@ -332,14 +331,14 @@ func TestDB_View_Panic(t *testing.T) {
} }
}() }()
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) != nil, "")
panic("omg") panic("omg")
}) })
}() }()
// Verify that we can still use read transactions. // Verify that we can still use read transactions.
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) != nil, "")
return nil return nil
}) })
} }
@ -358,9 +357,9 @@ func TestDB_Stats(t *testing.T) {
return err return err
}) })
stats := db.Stats() stats := db.Stats()
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount") equals(t, 2, stats.TxStats.PageCount)
assert.Equal(t, 0, stats.FreePageN, "FreePageN") equals(t, 0, stats.FreePageN)
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN") equals(t, 2, stats.PendingPageN)
} }
// Ensure that database pages are in expected order and type. // Ensure that database pages are in expected order and type.
@ -374,31 +373,37 @@ func TestDB_Consistency(t *testing.T) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil return nil
}) })
} }
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
if p, _ := tx.Page(0); assert.NotNil(t, p) { p, _ := tx.Page(0)
assert.Equal(t, "meta", p.Type) assert(t, p != nil, "")
} equals(t, "meta", p.Type)
if p, _ := tx.Page(1); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type) p, _ = tx.Page(1)
} assert(t, p != nil, "")
if p, _ := tx.Page(2); assert.NotNil(t, p) { equals(t, "meta", p.Type)
assert.Equal(t, "free", p.Type)
} p, _ = tx.Page(2)
if p, _ := tx.Page(3); assert.NotNil(t, p) { assert(t, p != nil, "")
assert.Equal(t, "free", p.Type) equals(t, "free", p.Type)
}
if p, _ := tx.Page(4); assert.NotNil(t, p) { p, _ = tx.Page(3)
assert.Equal(t, "leaf", p.Type) // root leaf assert(t, p != nil, "")
} equals(t, "free", p.Type)
if p, _ := tx.Page(5); assert.NotNil(t, p) {
assert.Equal(t, "freelist", p.Type) p, _ = tx.Page(4)
} assert(t, p != nil, "")
p, _ := tx.Page(6) equals(t, "leaf", p.Type)
assert.Nil(t, p)
p, _ = tx.Page(5)
assert(t, p != nil, "")
equals(t, "freelist", p.Type)
p, _ = tx.Page(6)
assert(t, p == nil, "")
return nil return nil
}) })
} }
@ -411,9 +416,9 @@ func TestDBStats_Sub(t *testing.T) {
b.TxStats.PageCount = 10 b.TxStats.PageCount = 10
b.FreePageN = 14 b.FreePageN = 14
diff := b.Sub(&a) diff := b.Sub(&a)
assert.Equal(t, 7, diff.TxStats.PageCount) equals(t, 7, diff.TxStats.PageCount)
// free page stats are copied from the receiver and not subtracted // free page stats are copied from the receiver and not subtracted
assert.Equal(t, 14, diff.FreePageN) equals(t, 14, diff.FreePageN)
} }
func ExampleDB_Update() { func ExampleDB_Update() {

View File

@ -1,24 +1,27 @@
package bolt package bolt
import ( import (
"reflect"
"testing" "testing"
"unsafe" "unsafe"
"github.com/stretchr/testify/assert"
) )
// Ensure that a page is added to a transaction's freelist. // Ensure that a page is added to a transaction's freelist.
func TestFreelist_free(t *testing.T) { func TestFreelist_free(t *testing.T) {
f := newFreelist() f := newFreelist()
f.free(100, &page{id: 12}) f.free(100, &page{id: 12})
assert.Equal(t, f.pending[100], []pgid{12}) if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
}
} }
// Ensure that a page and its overflow is added to a transaction's freelist. // Ensure that a page and its overflow is added to a transaction's freelist.
func TestFreelist_free_overflow(t *testing.T) { func TestFreelist_free_overflow(t *testing.T) {
f := newFreelist() f := newFreelist()
f.free(100, &page{id: 12, overflow: 3}) f.free(100, &page{id: 12, overflow: 3})
assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15}) if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
}
} }
// Ensure that a transaction's free pages can be released. // Ensure that a transaction's free pages can be released.
@ -29,25 +32,56 @@ func TestFreelist_release(t *testing.T) {
f.free(102, &page{id: 39}) f.free(102, &page{id: 39})
f.release(100) f.release(100)
f.release(101) f.release(101)
assert.Equal(t, []pgid{9, 12, 13}, f.ids) if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
f.release(102) f.release(102)
assert.Equal(t, []pgid{9, 12, 13, 39}, f.ids) if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
} }
// Ensure that a freelist can find contiguous blocks of pages. // Ensure that a freelist can find contiguous blocks of pages.
func TestFreelist_allocate(t *testing.T) { func TestFreelist_allocate(t *testing.T) {
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
assert.Equal(t, 3, int(f.allocate(3))) if id := int(f.allocate(3)); id != 3 {
assert.Equal(t, 6, int(f.allocate(1))) t.Fatalf("exp=3; got=%v", id)
assert.Equal(t, 0, int(f.allocate(3))) }
assert.Equal(t, 12, int(f.allocate(2))) if id := int(f.allocate(1)); id != 6 {
assert.Equal(t, 7, int(f.allocate(1))) t.Fatalf("exp=6; got=%v", id)
assert.Equal(t, 0, int(f.allocate(0))) }
assert.Equal(t, []pgid{9, 18}, f.ids) if id := int(f.allocate(3)); id != 0 {
assert.Equal(t, 9, int(f.allocate(1))) t.Fatalf("exp=0; got=%v", id)
assert.Equal(t, 18, int(f.allocate(1))) }
assert.Equal(t, 0, int(f.allocate(1))) if id := int(f.allocate(2)); id != 12 {
assert.Equal(t, []pgid{}, f.ids) t.Fatalf("exp=12; got=%v", id)
}
if id := int(f.allocate(1)); id != 7 {
t.Fatalf("exp=7; got=%v", id)
}
if id := int(f.allocate(0)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if id := int(f.allocate(0)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
if id := int(f.allocate(1)); id != 9 {
t.Fatalf("exp=9; got=%v", id)
}
if id := int(f.allocate(1)); id != 18 {
t.Fatalf("exp=18; got=%v", id)
}
if id := int(f.allocate(1)); id != 0 {
t.Fatalf("exp=0; got=%v", id)
}
if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
t.Fatalf("exp=%v; got=%v", exp, f.ids)
}
} }
// Ensure that a freelist can deserialize from a freelist page. // Ensure that a freelist can deserialize from a freelist page.
@ -68,9 +102,9 @@ func TestFreelist_read(t *testing.T) {
f.read(page) f.read(page)
// Ensure that there are two page ids in the freelist. // Ensure that there are two page ids in the freelist.
assert.Equal(t, len(f.ids), 2) if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
assert.Equal(t, f.ids[0], pgid(23)) t.Fatalf("exp=%v; got=%v", exp, f.ids)
assert.Equal(t, f.ids[1], pgid(50)) }
} }
// Ensure that a freelist can serialize into a freelist page. // Ensure that a freelist can serialize into a freelist page.
@ -89,10 +123,7 @@ func TestFreelist_write(t *testing.T) {
// Ensure that the freelist is correct. // Ensure that the freelist is correct.
// All pages should be present and in reverse order. // All pages should be present and in reverse order.
assert.Equal(t, len(f2.ids), 5) if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
assert.Equal(t, f2.ids[0], pgid(3)) t.Fatalf("exp=%v; got=%v", exp, f2.ids)
assert.Equal(t, f2.ids[1], pgid(11)) }
assert.Equal(t, f2.ids[2], pgid(12))
assert.Equal(t, f2.ids[3], pgid(28))
assert.Equal(t, f2.ids[4], pgid(39))
} }

View File

@ -3,8 +3,6 @@ package bolt
import ( import (
"testing" "testing"
"unsafe" "unsafe"
"github.com/stretchr/testify/assert"
) )
// Ensure that a node can insert a key/value. // Ensure that a node can insert a key/value.
@ -14,14 +12,22 @@ func TestNode_put(t *testing.T) {
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
assert.Equal(t, len(n.inodes), 3)
assert.Equal(t, n.inodes[0].key, []byte("bar")) if len(n.inodes) != 3 {
assert.Equal(t, n.inodes[0].value, []byte("1")) t.Fatalf("exp=3; got=%d", len(n.inodes))
assert.Equal(t, n.inodes[1].key, []byte("baz")) }
assert.Equal(t, n.inodes[1].value, []byte("2")) if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
assert.Equal(t, n.inodes[2].key, []byte("foo")) t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
assert.Equal(t, n.inodes[2].value, []byte("3")) }
assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag)) if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
}
if n.inodes[2].flags != uint32(leafPageFlag) {
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
}
} }
// Ensure that a node can deserialize from a leaf page. // Ensure that a node can deserialize from a leaf page.
@ -47,12 +53,18 @@ func TestNode_read_LeafPage(t *testing.T) {
n.read(page) n.read(page)
// Check that there are two inodes with correct data. // Check that there are two inodes with correct data.
assert.True(t, n.isLeaf) if !n.isLeaf {
assert.Equal(t, len(n.inodes), 2) t.Fatalf("expected leaf", n.isLeaf)
assert.Equal(t, n.inodes[0].key, []byte("bar")) }
assert.Equal(t, n.inodes[0].value, []byte("fooz")) if len(n.inodes) != 2 {
assert.Equal(t, n.inodes[1].key, []byte("helloworld")) t.Fatalf("exp=2; got=%d", len(n.inodes))
assert.Equal(t, n.inodes[1].value, []byte("bye")) }
if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
}
if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
}
} }
// Ensure that a node can serialize into a leaf page. // Ensure that a node can serialize into a leaf page.
@ -73,13 +85,18 @@ func TestNode_write_LeafPage(t *testing.T) {
n2.read(p) n2.read(p)
// Check that the two pages are the same. // Check that the two pages are the same.
assert.Equal(t, len(n2.inodes), 3) if len(n2.inodes) != 3 {
assert.Equal(t, n2.inodes[0].key, []byte("john")) t.Fatalf("exp=3; got=%d", len(n2.inodes))
assert.Equal(t, n2.inodes[0].value, []byte("johnson")) }
assert.Equal(t, n2.inodes[1].key, []byte("ricki")) if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
assert.Equal(t, n2.inodes[1].value, []byte("lake")) t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
assert.Equal(t, n2.inodes[2].key, []byte("susy")) }
assert.Equal(t, n2.inodes[2].value, []byte("que")) if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
}
if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
}
} }
// Ensure that a node can split into appropriate subgroups. // Ensure that a node can split into appropriate subgroups.
@ -96,9 +113,15 @@ func TestNode_split(t *testing.T) {
n.split(100) n.split(100)
var parent = n.parent var parent = n.parent
assert.Equal(t, len(parent.children), 2) if len(parent.children) != 2 {
assert.Equal(t, len(parent.children[0].inodes), 2) t.Fatalf("exp=2; got=%d", len(parent.children))
assert.Equal(t, len(parent.children[1].inodes), 3) }
if len(parent.children[0].inodes) != 2 {
t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
}
if len(parent.children[1].inodes) != 3 {
t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
}
} }
// Ensure that a page with the minimum number of inodes just returns a single node. // Ensure that a page with the minimum number of inodes just returns a single node.
@ -110,7 +133,9 @@ func TestNode_split_MinKeys(t *testing.T) {
// Split. // Split.
n.split(20) n.split(20)
assert.Nil(t, n.parent) if n.parent != nil {
t.Fatalf("expected nil parent")
}
} }
// Ensure that a node that has keys that all fit on a page just returns one leaf. // Ensure that a node that has keys that all fit on a page just returns one leaf.
@ -125,5 +150,7 @@ func TestNode_split_SinglePage(t *testing.T) {
// Split. // Split.
n.split(4096) n.split(4096)
assert.Nil(t, n.parent) if n.parent != nil {
t.Fatalf("expected nil parent")
}
} }

View File

@ -1,17 +1,26 @@
package bolt package bolt
import ( import (
"github.com/stretchr/testify/assert"
"testing" "testing"
) )
// Ensure that the page type can be returned in human readable format. // Ensure that the page type can be returned in human readable format.
func TestPage_typ(t *testing.T) { func TestPage_typ(t *testing.T) {
assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch") if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf") t.Fatalf("exp=branch; got=%v", typ)
assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta") }
assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist") if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>") t.Fatalf("exp=leaf; got=%v", typ)
}
if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
t.Fatalf("exp=meta; got=%v", typ)
}
if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
t.Fatalf("exp=freelist; got=%v", typ)
}
if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
t.Fatalf("exp=unknown<4e20>; got=%v", typ)
}
} }
// Ensure that the hexdump debugging function doesn't blow up. // Ensure that the hexdump debugging function doesn't blow up.

View File

@ -8,7 +8,6 @@ import (
"testing" "testing"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/stretchr/testify/assert"
) )
func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
@ -90,7 +89,7 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
versions[tx.ID()] = qdb versions[tx.ID()] = qdb
mutex.Unlock() mutex.Unlock()
assert.NoError(t, tx.Commit()) ok(t, tx.Commit())
}() }()
} else { } else {
defer tx.Rollback() defer tx.Rollback()

View File

@ -7,7 +7,6 @@ import (
"testing" "testing"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/stretchr/testify/assert"
) )
// Ensure that committing a closed transaction returns an error. // Ensure that committing a closed transaction returns an error.
@ -16,8 +15,8 @@ func TestTx_Commit_Closed(t *testing.T) {
defer db.Close() defer db.Close()
tx, _ := db.Begin(true) tx, _ := db.Begin(true)
tx.CreateBucket([]byte("foo")) tx.CreateBucket([]byte("foo"))
assert.NoError(t, tx.Commit()) ok(t, tx.Commit())
assert.Equal(t, tx.Commit(), bolt.ErrTxClosed) equals(t, tx.Commit(), bolt.ErrTxClosed)
} }
// Ensure that rolling back a closed transaction returns an error. // Ensure that rolling back a closed transaction returns an error.
@ -25,8 +24,8 @@ func TestTx_Rollback_Closed(t *testing.T) {
db := NewTestDB() db := NewTestDB()
defer db.Close() defer db.Close()
tx, _ := db.Begin(true) tx, _ := db.Begin(true)
assert.NoError(t, tx.Rollback()) ok(t, tx.Rollback())
assert.Equal(t, tx.Rollback(), bolt.ErrTxClosed) equals(t, tx.Rollback(), bolt.ErrTxClosed)
} }
// Ensure that committing a read-only transaction returns an error. // Ensure that committing a read-only transaction returns an error.
@ -34,7 +33,7 @@ func TestTx_Commit_ReadOnly(t *testing.T) {
db := NewTestDB() db := NewTestDB()
defer db.Close() defer db.Close()
tx, _ := db.Begin(false) tx, _ := db.Begin(false)
assert.Equal(t, tx.Commit(), bolt.ErrTxNotWritable) equals(t, tx.Commit(), bolt.ErrTxNotWritable)
} }
// Ensure that a transaction can retrieve a cursor on the root bucket. // Ensure that a transaction can retrieve a cursor on the root bucket.
@ -47,16 +46,16 @@ func TestTx_Cursor(t *testing.T) {
c := tx.Cursor() c := tx.Cursor()
k, v := c.First() k, v := c.First()
assert.Equal(t, "widgets", string(k)) equals(t, "widgets", string(k))
assert.Nil(t, v) assert(t, v == nil, "")
k, v = c.Next() k, v = c.Next()
assert.Equal(t, "woojits", string(k)) equals(t, "woojits", string(k))
assert.Nil(t, v) assert(t, v == nil, "")
k, v = c.Next() k, v = c.Next()
assert.Nil(t, k) assert(t, k == nil, "")
assert.Nil(t, v) assert(t, v == nil, "")
return nil return nil
}) })
@ -68,8 +67,8 @@ func TestTx_CreateBucket_ReadOnly(t *testing.T) {
defer db.Close() defer db.Close()
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, bolt.ErrTxNotWritable, err) equals(t, bolt.ErrTxNotWritable, err)
return nil return nil
}) })
} }
@ -81,8 +80,8 @@ func TestTx_CreateBucket_Closed(t *testing.T) {
tx, _ := db.Begin(true) tx, _ := db.Begin(true)
tx.Commit() tx.Commit()
b, err := tx.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, bolt.ErrTxClosed, err) equals(t, bolt.ErrTxClosed, err)
} }
// Ensure that a Tx can retrieve a bucket. // Ensure that a Tx can retrieve a bucket.
@ -92,7 +91,7 @@ func TestTx_Bucket(t *testing.T) {
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
return nil return nil
}) })
} }
@ -105,7 +104,7 @@ func TestTx_Get_Missing(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
assert.Nil(t, value) assert(t, value == nil, "")
return nil return nil
}) })
} }
@ -118,15 +117,15 @@ func TestTx_CreateBucket(t *testing.T) {
// Create a bucket. // Create a bucket.
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
// Read the bucket through a separate transaction. // Read the bucket through a separate transaction.
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
return nil return nil
}) })
} }
@ -137,27 +136,27 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets")) b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
assert.NoError(t, err) ok(t, err)
b, err = tx.CreateBucketIfNotExists([]byte("widgets")) b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
assert.NoError(t, err) ok(t, err)
b, err = tx.CreateBucketIfNotExists([]byte{}) b, err = tx.CreateBucketIfNotExists([]byte{})
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, bolt.ErrBucketNameRequired, err) equals(t, bolt.ErrBucketNameRequired, err)
b, err = tx.CreateBucketIfNotExists(nil) b, err = tx.CreateBucketIfNotExists(nil)
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, bolt.ErrBucketNameRequired, err) equals(t, bolt.ErrBucketNameRequired, err)
return nil return nil
}) })
// Read the bucket through a separate transaction. // Read the bucket through a separate transaction.
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
return nil return nil
}) })
} }
@ -169,16 +168,16 @@ func TestTx_CreateBucket_Exists(t *testing.T) {
// Create a bucket. // Create a bucket.
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
assert.NoError(t, err) ok(t, err)
return nil return nil
}) })
// Create the same bucket again. // Create the same bucket again.
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, bolt.ErrBucketExists, err) equals(t, bolt.ErrBucketExists, err)
return nil return nil
}) })
} }
@ -189,8 +188,8 @@ func TestTx_CreateBucket_NameRequired(t *testing.T) {
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket(nil) b, err := tx.CreateBucket(nil)
assert.Nil(t, b) assert(t, b == nil, "")
assert.Equal(t, bolt.ErrBucketNameRequired, err) equals(t, bolt.ErrBucketNameRequired, err)
return nil return nil
}) })
} }
@ -209,17 +208,17 @@ func TestTx_DeleteBucket(t *testing.T) {
// Delete the bucket and make sure we can't get the value. // Delete the bucket and make sure we can't get the value.
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) ok(t, tx.DeleteBucket([]byte("widgets")))
assert.Nil(t, tx.Bucket([]byte("widgets"))) assert(t, tx.Bucket([]byte("widgets")) == nil, "")
return nil return nil
}) })
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
// Create the bucket again and make sure there's not a phantom value. // Create the bucket again and make sure there's not a phantom value.
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) assert(t, b != nil, "")
assert.NoError(t, err) ok(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "")
return nil return nil
}) })
} }
@ -230,7 +229,7 @@ func TestTx_DeleteBucket_Closed(t *testing.T) {
defer db.Close() defer db.Close()
tx, _ := db.Begin(true) tx, _ := db.Begin(true)
tx.Commit() tx.Commit()
assert.Equal(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed)
} }
// Ensure that deleting a bucket with a read-only transaction returns an error. // Ensure that deleting a bucket with a read-only transaction returns an error.
@ -238,7 +237,7 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
db := NewTestDB() db := NewTestDB()
defer db.Close() defer db.Close()
db.View(func(tx *bolt.Tx) error { db.View(func(tx *bolt.Tx) error {
assert.Equal(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable)
return nil return nil
}) })
} }
@ -248,7 +247,7 @@ func TestTx_DeleteBucket_NotFound(t *testing.T) {
db := NewTestDB() db := NewTestDB()
defer db.Close() defer db.Close()
db.Update(func(tx *bolt.Tx) error { db.Update(func(tx *bolt.Tx) error {
assert.Equal(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
return nil return nil
}) })
} }
@ -264,7 +263,7 @@ func TestTx_OnCommit(t *testing.T) {
_, err := tx.CreateBucket([]byte("widgets")) _, err := tx.CreateBucket([]byte("widgets"))
return err return err
}) })
assert.Equal(t, 3, x) equals(t, 3, x)
} }
// Ensure that Tx commit handlers are NOT called after a transaction rolls back. // Ensure that Tx commit handlers are NOT called after a transaction rolls back.
@ -278,7 +277,7 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
tx.CreateBucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
return errors.New("rollback this commit") return errors.New("rollback this commit")
}) })
assert.Equal(t, 0, x) equals(t, 0, x)
} }
// Ensure that the database can be copied to a file path. // Ensure that the database can be copied to a file path.
@ -293,15 +292,15 @@ func TestTx_CopyFile(t *testing.T) {
return nil return nil
}) })
assert.NoError(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) }))
db2, err := bolt.Open(dest, 0600, nil) db2, err := bolt.Open(dest, 0600, nil)
assert.NoError(t, err) ok(t, err)
defer db2.Close() defer db2.Close()
db2.View(func(tx *bolt.Tx) error { db2.View(func(tx *bolt.Tx) error {
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil return nil
}) })
} }
@ -339,7 +338,7 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) {
}) })
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests") equals(t, err.Error(), "meta copy: error injected for tests")
} }
// Ensure that Copy handles write errors right. // Ensure that Copy handles write errors right.
@ -354,7 +353,7 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) {
}) })
err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) })
assert.EqualError(t, err, "error injected for tests") equals(t, err.Error(), "error injected for tests")
} }
func ExampleTx_Rollback() { func ExampleTx_Rollback() {