Remove wrapping test closures.

pull/34/head
Ben Johnson 2014-07-26 14:44:04 -06:00
parent 06222e06de
commit ca2339d7cb
5 changed files with 1496 additions and 1461 deletions

View File

@ -17,19 +17,20 @@ import (
// Ensure that a bucket that gets a non-existent key returns nil.
func TestBucket_Get_NonExistent(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Nil(t, value)
return nil
})
})
}
// Ensure that a bucket can read a value that is not flushed yet.
func TestBucket_Get_FromNode(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
@ -38,12 +39,12 @@ func TestBucket_Get_FromNode(t *testing.T) {
assert.Equal(t, value, []byte("bar"))
return nil
})
})
}
// Ensure that a bucket retrieved via Get() returns a nil.
func TestBucket_Get_IncompatibleValue(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
@ -51,12 +52,12 @@ func TestBucket_Get_IncompatibleValue(t *testing.T) {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
})
})
}
// Ensure that a bucket can write a key/value.
func TestBucket_Put(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
@ -65,12 +66,12 @@ func TestBucket_Put(t *testing.T) {
assert.Equal(t, value, []byte("bar"))
return nil
})
})
}
// Ensure that a bucket can rewrite a key in the same transaction.
func TestBucket_Put_Repeat(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
@ -80,14 +81,14 @@ func TestBucket_Put_Repeat(t *testing.T) {
assert.Equal(t, value, []byte("baz"))
return nil
})
})
}
// Ensure that a bucket can write a bunch of large values.
func TestBucket_Put_Large(t *testing.T) {
var count = 100
var factor = 200
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
count, factor := 100, 200
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
@ -104,7 +105,6 @@ func TestBucket_Put_Large(t *testing.T) {
}
return nil
})
})
}
// Ensure that a database can perform multiple large appends safely.
@ -116,7 +116,9 @@ func TestDB_Put_VeryLarge(t *testing.T) {
n, batchN := 400000, 200000
ksize, vsize := 8, 500
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
for i := 0; i < n; i += batchN {
err := db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("widgets"))
@ -129,12 +131,12 @@ func TestDB_Put_VeryLarge(t *testing.T) {
})
assert.NoError(t, err)
}
})
}
// Ensure that a setting a value on a key with a bucket value returns an error.
func TestBucket_Put_IncompatibleValue(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
@ -142,23 +144,23 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) {
assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
})
}
// Ensure that a setting a value while the transaction is closed returns an error.
func TestBucket_Put_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
tx.Rollback()
assert.Equal(t, ErrTxClosed, b.Put([]byte("foo"), []byte("bar")))
})
}
// Ensure that setting a value on a read-only bucket returns an error.
func TestBucket_Put_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -170,12 +172,12 @@ func TestBucket_Put_ReadOnly(t *testing.T) {
assert.Equal(t, err, ErrTxNotWritable)
return nil
})
})
}
// Ensure that a bucket can delete an existing key.
func TestBucket_Delete(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
@ -185,12 +187,12 @@ func TestBucket_Delete(t *testing.T) {
assert.Nil(t, value)
return nil
})
})
}
// Ensure that deleting a large set of keys will work correctly.
func TestBucket_Delete_Large(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
var b, _ = tx.CreateBucket([]byte("widgets"))
for i := 0; i < 100; i++ {
@ -212,7 +214,6 @@ func TestBucket_Delete_Large(t *testing.T) {
}
return nil
})
})
}
// Deleting a very large list of keys will cause the freelist to use overflow.
@ -221,7 +222,8 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
t.Skip("skipping test in short mode.")
}
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
k := make([]byte, 16)
for i := uint64(0); i < 10000; i++ {
err := db.Update(func(tx *Tx) error {
@ -258,12 +260,12 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
// Check that a freelist overflow occurred.
assert.NoError(t, err)
})
}
// Ensure that accessing and updating nested buckets is ok across transactions.
func TestBucket_Nested(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
// Create a widgets bucket.
b, err := tx.CreateBucket([]byte("widgets"))
@ -278,7 +280,7 @@ func TestBucket_Nested(t *testing.T) {
return nil
})
mustCheck(db)
db.MustCheck()
// Update widgets/bar.
db.Update(func(tx *Tx) error {
@ -286,7 +288,7 @@ func TestBucket_Nested(t *testing.T) {
assert.NoError(t, b.Put([]byte("bar"), []byte("xxxx")))
return nil
})
mustCheck(db)
db.MustCheck()
// Cause a split.
db.Update(func(tx *Tx) error {
@ -296,7 +298,7 @@ func TestBucket_Nested(t *testing.T) {
}
return nil
})
mustCheck(db)
db.MustCheck()
// Insert into widgets/foo/baz.
db.Update(func(tx *Tx) error {
@ -304,7 +306,7 @@ func TestBucket_Nested(t *testing.T) {
assert.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")))
return nil
})
mustCheck(db)
db.MustCheck()
// Verify.
db.View(func(tx *Tx) error {
@ -316,12 +318,12 @@ func TestBucket_Nested(t *testing.T) {
}
return nil
})
})
}
// Ensure that deleting a bucket using Delete() returns an error.
func TestBucket_Delete_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
@ -330,12 +332,12 @@ func TestBucket_Delete_Bucket(t *testing.T) {
assert.Equal(t, ErrIncompatibleValue, b.Delete([]byte("foo")))
return nil
})
})
}
// Ensure that deleting a key on a read-only bucket returns an error.
func TestBucket_Delete_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
@ -346,23 +348,23 @@ func TestBucket_Delete_ReadOnly(t *testing.T) {
assert.Equal(t, err, ErrTxNotWritable)
return nil
})
})
}
// Ensure that a deleting value while the transaction is closed returns an error.
func TestBucket_Delete_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
tx.Rollback()
assert.Equal(t, ErrTxClosed, b.Delete([]byte("foo")))
})
}
// Ensure that deleting a bucket causes nested buckets to be deleted.
func TestBucket_DeleteBucket_Nested(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
@ -373,12 +375,12 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
return nil
})
})
}
// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
func TestBucket_DeleteBucket_Nested2(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
@ -400,12 +402,12 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
assert.Nil(t, tx.Bucket([]byte("widgets")))
return nil
})
})
}
// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
func TestBucket_DeleteBucket_Large(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -422,25 +424,25 @@ func TestBucket_DeleteBucket_Large(t *testing.T) {
return nil
})
// NOTE: Consistency check in withOpenDB() will error if pages not freed properly.
})
// NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly.
}
// Ensure that a simple value retrieved via Bucket() returns a nil.
func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
return nil
})
})
}
// Ensure that creating a bucket on an existing non-bucket key returns an error.
func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -449,12 +451,12 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
assert.Equal(t, ErrIncompatibleValue, err)
return nil
})
})
}
// Ensure that deleting a bucket on an existing non-bucket key returns an error.
func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -462,12 +464,12 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
return nil
})
})
}
// Ensure that a bucket can return an autoincrementing sequence.
func TestBucket_NextSequence(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.CreateBucket([]byte("woojits"))
@ -486,12 +488,12 @@ func TestBucket_NextSequence(t *testing.T) {
assert.Equal(t, seq, uint64(1))
return nil
})
})
}
// Ensure that retrieving the next sequence on a read-only bucket returns an error.
func TestBucket_NextSequence_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
@ -503,24 +505,24 @@ func TestBucket_NextSequence_ReadOnly(t *testing.T) {
assert.Equal(t, err, ErrTxNotWritable)
return nil
})
})
}
// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
func TestBucket_NextSequence_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
tx.Rollback()
_, err := b.NextSequence()
assert.Equal(t, ErrTxClosed, err)
})
}
// Ensure a user can loop over all key/value pairs in a bucket.
func TestBucket_ForEach(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
@ -547,12 +549,12 @@ func TestBucket_ForEach(t *testing.T) {
assert.Equal(t, index, 3)
return nil
})
})
}
// Ensure a database can stop iteration early.
func TestBucket_ForEach_ShortCircuit(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000"))
@ -571,24 +573,24 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
assert.Equal(t, 2, index)
return nil
})
})
}
// Ensure that looping over a bucket on a closed database returns an error.
func TestBucket_ForEach_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
tx.Rollback()
err := b.ForEach(func(k, v []byte) error { return nil })
assert.Equal(t, ErrTxClosed, err)
})
}
// Ensure that an error is returned when inserting with an empty key.
func TestBucket_Put_EmptyKey(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar"))
@ -597,24 +599,25 @@ func TestBucket_Put_EmptyKey(t *testing.T) {
assert.Equal(t, err, ErrKeyRequired)
return nil
})
})
}
// Ensure that an error is returned when inserting with a key that's too large.
func TestBucket_Put_KeyTooLarge(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar"))
assert.Equal(t, err, ErrKeyTooLarge)
return nil
})
})
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Add bucket with fewer keys but one big value.
big_key := []byte("really-big-value")
for i := 0; i < 500; i++ {
@ -628,7 +631,7 @@ func TestBucket_Stats(t *testing.T) {
return b.Put(big_key, []byte(strings.Repeat("*", 10000)))
})
mustCheck(db)
db.MustCheck()
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("woojits"))
stats := b.Stats()
@ -661,19 +664,19 @@ func TestBucket_Stats(t *testing.T) {
assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse")
return nil
})
})
}
// Ensure a bucket with random insertion utilizes fill percentage correctly.
func TestBucket_Stats_RandomFill(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
if os.Getpagesize() != 4096 {
} else if os.Getpagesize() != 4096 {
t.Skip("invalid page size for test")
}
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Add a set of values in random order. It will be the same random
// order so we can maintain consistency between test runs.
var count int
@ -690,7 +693,7 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
return nil
})
}
mustCheck(db)
db.MustCheck()
db.View(func(tx *Tx) error {
s := tx.Bucket([]byte("woojits")).Stats()
@ -707,13 +710,12 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
assert.Equal(t, 13975552, s.LeafAlloc, "LeafAlloc")
return nil
})
})
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats_Small(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
// Add a bucket that fits on a single root leaf.
b, err := tx.CreateBucket([]byte("whozawhats"))
@ -722,7 +724,7 @@ func TestBucket_Stats_Small(t *testing.T) {
return nil
})
mustCheck(db)
db.MustCheck()
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
@ -744,19 +746,19 @@ func TestBucket_Stats_Small(t *testing.T) {
assert.Equal(t, pageHeaderSize+leafPageElementSize+6, stats.InlineBucketInuse, "InlineBucketInuse")
return nil
})
})
}
func TestBucket_Stats_EmptyBucket(t *testing.T) {
db := NewTestDB()
defer db.Close()
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
// Add a bucket that fits on a single root leaf.
_, err := tx.CreateBucket([]byte("whozawhats"))
assert.NoError(t, err)
return nil
})
mustCheck(db)
db.MustCheck()
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
@ -778,12 +780,13 @@ func TestBucket_Stats_EmptyBucket(t *testing.T) {
assert.Equal(t, pageHeaderSize, stats.InlineBucketInuse, "InlineBucketInuse")
return nil
})
})
}
// Ensure a bucket can calculate stats.
func TestBucket_Stats_Nested(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
assert.NoError(t, err)
@ -803,7 +806,7 @@ func TestBucket_Stats_Nested(t *testing.T) {
return nil
})
mustCheck(db)
db.MustCheck()
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("foo"))
@ -841,7 +844,6 @@ func TestBucket_Stats_Nested(t *testing.T) {
assert.Equal(t, baz, stats.InlineBucketInuse, "InlineBucketInuse")
return nil
})
})
}
// Ensure a large bucket can calculate stats.
@ -850,7 +852,9 @@ func TestBucket_Stats_Large(t *testing.T) {
t.Skip("skipping test in short mode.")
}
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
var index int
for i := 0; i < 100; i++ {
db.Update(func(tx *Tx) error {
@ -863,7 +867,7 @@ func TestBucket_Stats_Large(t *testing.T) {
return nil
})
}
mustCheck(db)
db.MustCheck()
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
@ -886,7 +890,6 @@ func TestBucket_Stats_Large(t *testing.T) {
assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse")
return nil
})
})
}
// Ensure that a bucket can write random keys and values across multiple transactions.
@ -897,7 +900,9 @@ func TestBucket_Put_Single(t *testing.T) {
index := 0
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
m := make(map[string][]byte)
db.Update(func(tx *Tx) error {
@ -920,14 +925,14 @@ func TestBucket_Put_Single(t *testing.T) {
value := tx.Bucket([]byte("widgets")).Get([]byte(k))
if !bytes.Equal(value, v) {
t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
copyAndFailNow(t, db)
db.CopyTempFile()
t.FailNow()
}
i++
}
return nil
})
}
})
index++
return true
@ -944,7 +949,8 @@ func TestBucket_Put_Multiple(t *testing.T) {
}
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Bulk insert all values.
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
@ -965,12 +971,12 @@ func TestBucket_Put_Multiple(t *testing.T) {
for _, item := range items {
value := b.Get(item.Key)
if !assert.Equal(t, item.Value, value) {
copyAndFailNow(t, db)
db.CopyTempFile()
t.FailNow()
}
}
return nil
})
})
return true
}
if err := quick.Check(f, qconfig()); err != nil {
@ -985,7 +991,8 @@ func TestBucket_Delete_Quick(t *testing.T) {
}
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Bulk insert all values.
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
@ -1016,7 +1023,6 @@ func TestBucket_Delete_Quick(t *testing.T) {
})
return nil
})
})
return true
}
if err := quick.Check(f, qconfig()); err != nil {

View File

@ -12,19 +12,20 @@ import (
// Ensure that a cursor can return a reference to the bucket that created it.
func TestCursor_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
c := b.Cursor()
assert.Equal(t, b, c.Bucket())
return nil
})
})
}
// Ensure that a Tx cursor can seek to the appropriate keys.
func TestCursor_Seek(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -65,11 +66,12 @@ func TestCursor_Seek(t *testing.T) {
return nil
})
})
}
func TestCursor_Delete(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
var count = 1000
// Insert every other key between 0 and $count.
@ -104,7 +106,6 @@ func TestCursor_Delete(t *testing.T) {
assert.Equal(t, b.Stats().KeyN, count/2+1)
return nil
})
})
}
// Ensure that a Tx cursor can seek to the appropriate keys when there are a
@ -113,7 +114,9 @@ func TestCursor_Delete(t *testing.T) {
//
// Related: https://github.com/boltdb/bolt/pull/187
func TestCursor_Seek_Large(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
var count = 10000
// Insert every other key between 0 and $count.
@ -155,12 +158,12 @@ func TestCursor_Seek_Large(t *testing.T) {
return nil
})
})
}
// Ensure that a cursor can iterate over an empty bucket without error.
func TestCursor_EmptyBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
@ -172,12 +175,13 @@ func TestCursor_EmptyBucket(t *testing.T) {
assert.Nil(t, v)
return nil
})
})
}
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
func TestCursor_EmptyBucketReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
@ -189,12 +193,13 @@ func TestCursor_EmptyBucketReverse(t *testing.T) {
assert.Nil(t, v)
return nil
})
})
}
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
func TestCursor_Iterate_Leaf(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
@ -226,12 +231,13 @@ func TestCursor_Iterate_Leaf(t *testing.T) {
assert.Nil(t, v)
tx.Rollback()
})
}
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
func TestCursor_LeafRootReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
@ -263,12 +269,13 @@ func TestCursor_LeafRootReverse(t *testing.T) {
assert.Nil(t, v)
tx.Rollback()
})
}
// Ensure that a Tx cursor can restart from the beginning.
func TestCursor_Restart(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
@ -292,13 +299,14 @@ func TestCursor_Restart(t *testing.T) {
assert.Equal(t, string(k), "foo")
tx.Rollback()
})
}
// Ensure that a Tx can iterate over all elements in a bucket.
func TestCursor_QuickCheck(t *testing.T) {
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
@ -322,7 +330,7 @@ func TestCursor_QuickCheck(t *testing.T) {
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true
}
if err := quick.Check(f, qconfig()); err != nil {
@ -333,7 +341,9 @@ func TestCursor_QuickCheck(t *testing.T) {
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
@ -357,7 +367,7 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true
}
if err := quick.Check(f, qconfig()); err != nil {
@ -367,7 +377,9 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
// Ensure that a Tx cursor can iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -389,12 +401,13 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
assert.Equal(t, names, []string{"bar", "baz", "foo"})
return nil
})
})
}
// Ensure that a Tx cursor can reverse iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
@ -416,5 +429,4 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
assert.Equal(t, names, []string{"foo", "baz", "bar"})
return nil
})
})
}

View File

@ -206,14 +206,14 @@ func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
// Ensure that a read-write transaction can be retrieved.
func TestDB_BeginRW(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, err := db.Begin(true)
assert.NotNil(t, tx)
assert.NoError(t, err)
assert.Equal(t, tx.DB(), db)
assert.Equal(t, tx.Writable(), true)
assert.NoError(t, tx.Commit())
})
}
// Ensure that opening a transaction while the DB is closed returns an error.
@ -226,7 +226,8 @@ func TestDB_BeginRW_Closed(t *testing.T) {
// Ensure a database can provide a transactional block.
func TestDB_Update(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
err := db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
@ -242,7 +243,6 @@ func TestDB_Update(t *testing.T) {
return nil
})
assert.NoError(t, err)
})
}
// Ensure a closed database returns an error while running a transaction block
@ -273,7 +273,9 @@ func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
// Ensure a write transaction that panics does not hold open locks.
func TestDB_Update_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
func() {
defer func() {
if r := recover(); r != nil {
@ -298,22 +300,22 @@ func TestDB_Update_Panic(t *testing.T) {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
})
})
}
// Ensure a database can return an error through a read-only transactional block.
func TestDB_View_Error(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
err := db.View(func(tx *Tx) error {
return errors.New("xxx")
})
assert.Equal(t, errors.New("xxx"), err)
})
}
// Ensure a read transaction that panics does not hold open locks.
func TestDB_View_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
@ -336,7 +338,6 @@ func TestDB_View_Panic(t *testing.T) {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
})
})
}
// Ensure that an error is returned when a database write fails.
@ -346,7 +347,8 @@ func TestDB_Commit_WriteFail(t *testing.T) {
// Ensure that DB stats can be returned.
func TestDB_Stats(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
@ -355,7 +357,6 @@ func TestDB_Stats(t *testing.T) {
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
})
}
// Ensure that the mmap grows appropriately.
@ -373,7 +374,8 @@ func TestDB_mmapSize(t *testing.T) {
// Ensure that database pages are in expected order and type.
func TestDB_Consistency(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
@ -408,7 +410,6 @@ func TestDB_Consistency(t *testing.T) {
assert.Nil(t, p)
return nil
})
})
}
// Ensure that a database can return a string representation of itself.
@ -451,7 +452,9 @@ func TestDB_StrictMode(t *testing.T) {
msg = fmt.Sprintf("%s", recover())
}()
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.StrictMode = true
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
@ -461,7 +464,6 @@ func TestDB_StrictMode(t *testing.T) {
return nil
})
})
}()
assert.Equal(t, "check fail: page 4: unreachable unfreed", msg)
@ -474,7 +476,11 @@ func TestDB_DoubleFree(t *testing.T) {
defer func() {
msg = fmt.Sprintf("%s", recover())
}()
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer os.Remove(db.DB.Path())
defer db.DB.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
@ -483,7 +489,6 @@ func TestDB_DoubleFree(t *testing.T) {
return nil
})
})
}()
assert.Equal(t, "assertion failed: page 3 already freed", msg)
@ -580,37 +585,53 @@ func ExampleDB_Begin_ReadOnly() {
// zephyr likes purple
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
// TestDB represents a wrapper around a Bolt DB to handle temporary file
// creation and automatic cleanup on close.
type TestDB struct {
*DB
}
// withOpenDB executes a function with an already opened database.
func withOpenDB(fn func(*DB, string)) {
path := tempfile()
defer os.Remove(path)
db, err := Open(path, 0666, nil)
// NewTestDB returns a new instance of TestDB.
func NewTestDB() *TestDB {
db, err := Open(tempfile(), 0666, nil)
if err != nil {
panic("cannot open db: " + err.Error())
}
defer db.Close()
fn(db, path)
return &TestDB{db}
}
// Close closes the database and deletes the underlying file.
func (db *TestDB) Close() {
// Log statistics.
if *statsFlag {
logStats(db)
db.PrintStats()
}
// Check database consistency after every test.
mustCheck(db)
db.MustCheck()
// Close database and remove file.
defer os.Remove(db.Path())
db.DB.Close()
}
// mustCheck runs a consistency check on the database and panics if any errors are found.
func mustCheck(db *DB) {
// PrintStats prints the database stats
func (db *TestDB) PrintStats() {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
// MustCheck runs a consistency check on the database and panics if any errors are found.
func (db *TestDB) MustCheck() {
db.View(func(tx *Tx) error {
// Collect all the errors.
var errors []error
@ -643,6 +664,21 @@ func mustCheck(db *DB) {
})
}
// CopyTempFile copies a database to a temporary file.
func (db *TestDB) CopyTempFile() {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *Bucket, m map[string]string) {
found := make(map[string]string)
@ -682,29 +718,6 @@ func trunc(b []byte, length int) []byte {
return b
}
// writes the current database stats to the testing log.
func logStats(db *DB) {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
}
// copyAndFailNow copies a database to a new location and then fails then test.
func copyAndFailNow(t *testing.T, db *DB) {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
t.FailNow()
}

View File

@ -41,7 +41,10 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
var versions = make(map[txid]*QuickDB)
versions[1] = NewQuickDB()
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
var mutex sync.Mutex
// Run n threads in parallel, each with their own operation.
@ -112,7 +115,6 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
// Wait until all threads are done.
wg.Wait()
})
}
type simulateHandler func(tx *Tx, qdb *QuickDB)

View File

@ -11,34 +11,35 @@ import (
// Ensure that committing a closed transaction returns an error.
func TestTx_Commit_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("foo"))
assert.NoError(t, tx.Commit())
assert.Equal(t, tx.Commit(), ErrTxClosed)
})
}
// Ensure that rolling back a closed transaction returns an error.
func TestTx_Rollback_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
assert.NoError(t, tx.Rollback())
assert.Equal(t, tx.Rollback(), ErrTxClosed)
})
}
// Ensure that committing a read-only transaction returns an error.
func TestTx_Commit_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(false)
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
})
}
// Ensure that a transaction can retrieve a cursor on the root bucket.
func TestTx_Cursor(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.CreateBucket([]byte("woojits"))
@ -58,47 +59,47 @@ func TestTx_Cursor(t *testing.T) {
return nil
})
})
}
// Ensure that creating a bucket with a read-only transaction returns an error.
func TestTx_CreateBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.View(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b)
assert.Equal(t, ErrTxNotWritable, err)
return nil
})
})
}
// Ensure that creating a bucket on a closed transaction returns an error.
func TestTx_CreateBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.Commit()
b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b)
assert.Equal(t, ErrTxClosed, err)
})
}
// Ensure that a Tx can retrieve a bucket.
func TestTx_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
})
}
// Ensure that a Tx retrieving a non-existent key returns nil.
func TestTx_Get_Missing(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
@ -106,12 +107,13 @@ func TestTx_Get_Missing(t *testing.T) {
assert.Nil(t, value)
return nil
})
})
}
// Ensure that a bucket can be created and retrieved.
func TestTx_CreateBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Create a bucket.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
@ -126,12 +128,12 @@ func TestTx_CreateBucket(t *testing.T) {
assert.NotNil(t, b)
return nil
})
})
}
// Ensure that a bucket can be created if it doesn't already exist.
func TestTx_CreateBucketIfNotExists(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b)
@ -157,12 +159,12 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) {
assert.NotNil(t, b)
return nil
})
})
}
// Ensure that a bucket cannot be created twice.
func TestTx_CreateBucket_Exists(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Create a bucket.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
@ -178,24 +180,25 @@ func TestTx_CreateBucket_Exists(t *testing.T) {
assert.Equal(t, ErrBucketExists, err)
return nil
})
})
}
// Ensure that a bucket is created with a non-blank name.
func TestTx_CreateBucket_NameRequired(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket(nil)
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
return nil
})
})
}
// Ensure that a bucket can be deleted.
func TestTx_DeleteBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
// Create a bucket and add a value.
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
@ -228,69 +231,69 @@ func TestTx_DeleteBucket(t *testing.T) {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
})
})
}
// Ensure that deleting a bucket on a closed transaction returns an error.
func TestTx_DeleteBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.Commit()
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
})
}
// Ensure that deleting a bucket with a read-only transaction returns an error.
func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.View(func(tx *Tx) error {
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
return nil
})
})
}
// Ensure that nothing happens when deleting a bucket that doesn't exist.
func TestTx_DeleteBucket_NotFound(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
return nil
})
})
}
// Ensure that Tx commit handlers are called after a transaction successfully commits.
func TestTx_OnCommit(t *testing.T) {
var x int
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
})
assert.Equal(t, 3, x)
}
// Ensure that Tx commit handlers are NOT called after a transaction rolls back.
func TestTx_OnCommit_Rollback(t *testing.T) {
var x int
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
tx.CreateBucket([]byte("widgets"))
return errors.New("rollback this commit")
})
})
assert.Equal(t, 0, x)
}
// Ensure that the database can be copied to a file path.
func TestTx_CopyFile(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
var dest = tempfile()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
@ -310,7 +313,6 @@ func TestTx_CopyFile(t *testing.T) {
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
})
}
type failWriterError struct{}
@ -336,7 +338,8 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
// Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Meta(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
@ -346,12 +349,12 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) {
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests")
})
}
// Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Normal(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
@ -361,7 +364,6 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) {
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
assert.EqualError(t, err, "error injected for tests")
})
}
func ExampleTx_Rollback() {