Remove wrapping test closures.

pull/34/head
Ben Johnson 2014-07-26 14:44:04 -06:00
parent 06222e06de
commit ca2339d7cb
5 changed files with 1496 additions and 1461 deletions

File diff suppressed because it is too large Load Diff

View File

@ -12,98 +12,99 @@ import (
// Ensure that a cursor can return a reference to the bucket that created it. // Ensure that a cursor can return a reference to the bucket that created it.
func TestCursor_Bucket(t *testing.T) { func TestCursor_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, _ := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
c := b.Cursor() b, _ := tx.CreateBucket([]byte("widgets"))
assert.Equal(t, b, c.Bucket()) c := b.Cursor()
return nil assert.Equal(t, b, c.Bucket())
}) return nil
}) })
} }
// Ensure that a Tx cursor can seek to the appropriate keys. // Ensure that a Tx cursor can seek to the appropriate keys.
func TestCursor_Seek(t *testing.T) { func TestCursor_Seek(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
assert.NoError(t, err) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, b.Put([]byte("foo"), []byte("0001"))) assert.NoError(t, err)
assert.NoError(t, b.Put([]byte("bar"), []byte("0002"))) assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
assert.NoError(t, b.Put([]byte("baz"), []byte("0003"))) assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
_, err = b.CreateBucket([]byte("bkt")) assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
assert.NoError(t, err) _, err = b.CreateBucket([]byte("bkt"))
return nil assert.NoError(t, err)
}) return nil
db.View(func(tx *Tx) error { })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
// Exact match should go to the key. // Exact match should go to the key.
k, v := c.Seek([]byte("bar")) k, v := c.Seek([]byte("bar"))
assert.Equal(t, []byte("bar"), k) assert.Equal(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v) assert.Equal(t, []byte("0002"), v)
// Inexact match should go to the next key. // Inexact match should go to the next key.
k, v = c.Seek([]byte("bas")) k, v = c.Seek([]byte("bas"))
assert.Equal(t, []byte("baz"), k) assert.Equal(t, []byte("baz"), k)
assert.Equal(t, []byte("0003"), v) assert.Equal(t, []byte("0003"), v)
// Low key should go to the first key. // Low key should go to the first key.
k, v = c.Seek([]byte("")) k, v = c.Seek([]byte(""))
assert.Equal(t, []byte("bar"), k) assert.Equal(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v) assert.Equal(t, []byte("0002"), v)
// High key should return no key. // High key should return no key.
k, v = c.Seek([]byte("zzz")) k, v = c.Seek([]byte("zzz"))
assert.Nil(t, k) assert.Nil(t, k)
assert.Nil(t, v) assert.Nil(t, v)
// Buckets should return their key but no value. // Buckets should return their key but no value.
k, v = c.Seek([]byte("bkt")) k, v = c.Seek([]byte("bkt"))
assert.Equal(t, []byte("bkt"), k) assert.Equal(t, []byte("bkt"), k)
assert.Nil(t, v) assert.Nil(t, v)
return nil return nil
})
}) })
} }
func TestCursor_Delete(t *testing.T) { func TestCursor_Delete(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
var count = 1000 defer db.Close()
// Insert every other key between 0 and $count. var count = 1000
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets")) // Insert every other key between 0 and $count.
for i := 0; i < count; i += 1 { db.Update(func(tx *Tx) error {
k := make([]byte, 8) b, _ := tx.CreateBucket([]byte("widgets"))
binary.BigEndian.PutUint64(k, uint64(i)) for i := 0; i < count; i += 1 {
b.Put(k, make([]byte, 100)) k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
b.Put(k, make([]byte, 100))
}
b.CreateBucket([]byte("sub"))
return nil
})
db.Update(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
} }
b.CreateBucket([]byte("sub")) }
return nil c.Seek([]byte("sub"))
}) err := c.Delete()
assert.Equal(t, err, ErrIncompatibleValue)
return nil
})
db.Update(func(tx *Tx) error { db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() b := tx.Bucket([]byte("widgets"))
bound := make([]byte, 8) assert.Equal(t, b.Stats().KeyN, count/2+1)
binary.BigEndian.PutUint64(bound, uint64(count/2)) return nil
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
}
}
c.Seek([]byte("sub"))
err := c.Delete()
assert.Equal(t, err, ErrIncompatibleValue)
return nil
})
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.Equal(t, b.Stats().KeyN, count/2+1)
return nil
})
}) })
} }
@ -113,216 +114,223 @@ func TestCursor_Delete(t *testing.T) {
// //
// Related: https://github.com/boltdb/bolt/pull/187 // Related: https://github.com/boltdb/bolt/pull/187
func TestCursor_Seek_Large(t *testing.T) { func TestCursor_Seek_Large(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
var count = 10000 defer db.Close()
// Insert every other key between 0 and $count. var count = 10000
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets")) // Insert every other key between 0 and $count.
for i := 0; i < count; i += 100 { db.Update(func(tx *Tx) error {
for j := i; j < i+100; j += 2 { b, _ := tx.CreateBucket([]byte("widgets"))
k := make([]byte, 8) for i := 0; i < count; i += 100 {
binary.BigEndian.PutUint64(k, uint64(j)) for j := i; j < i+100; j += 2 {
b.Put(k, make([]byte, 100)) k := make([]byte, 8)
} binary.BigEndian.PutUint64(k, uint64(j))
b.Put(k, make([]byte, 100))
} }
return nil }
}) return nil
})
db.View(func(tx *Tx) error { db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
seek := make([]byte, 8) seek := make([]byte, 8)
binary.BigEndian.PutUint64(seek, uint64(i)) binary.BigEndian.PutUint64(seek, uint64(i))
k, _ := c.Seek(seek) k, _ := c.Seek(seek)
// The last seek is beyond the end of the the range so // The last seek is beyond the end of the the range so
// it should return nil. // it should return nil.
if i == count-1 { if i == count-1 {
assert.Nil(t, k) assert.Nil(t, k)
continue continue
}
// Otherwise we should seek to the exact key or the next key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
assert.Equal(t, uint64(i), num)
} else {
assert.Equal(t, uint64(i+1), num)
}
} }
return nil // Otherwise we should seek to the exact key or the next key.
}) num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
assert.Equal(t, uint64(i), num)
} else {
assert.Equal(t, uint64(i+1), num)
}
}
return nil
}) })
} }
// Ensure that a cursor can iterate over an empty bucket without error. // Ensure that a cursor can iterate over an empty bucket without error.
func TestCursor_EmptyBucket(t *testing.T) { func TestCursor_EmptyBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
db.View(func(tx *Tx) error { })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *Tx) error {
k, v := c.First() c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, k) k, v := c.First()
assert.Nil(t, v) assert.Nil(t, k)
return nil assert.Nil(t, v)
}) return nil
}) })
} }
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. // Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
func TestCursor_EmptyBucketReverse(t *testing.T) { func TestCursor_EmptyBucketReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets"))
return err db.Update(func(tx *Tx) error {
}) _, err := tx.CreateBucket([]byte("widgets"))
db.View(func(tx *Tx) error { return err
c := tx.Bucket([]byte("widgets")).Cursor() })
k, v := c.Last() db.View(func(tx *Tx) error {
assert.Nil(t, k) c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, v) k, v := c.Last()
return nil assert.Nil(t, k)
}) assert.Nil(t, v)
return nil
}) })
} }
// Ensure that a Tx cursor can iterate over a single root with a couple elements. // Ensure that a Tx cursor can iterate over a single root with a couple elements.
func TestCursor_Iterate_Leaf(t *testing.T) { func TestCursor_Iterate_Leaf(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First() db.Update(func(tx *Tx) error {
assert.Equal(t, string(k), "bar") tx.CreateBucket([]byte("widgets"))
assert.Equal(t, v, []byte{1}) tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
k, v = c.Next() tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
assert.Equal(t, string(k), "baz") return nil
assert.Equal(t, v, []byte{})
k, v = c.Next()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
}) })
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Next()
assert.Equal(t, string(k), "baz")
assert.Equal(t, v, []byte{})
k, v = c.Next()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
} }
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
func TestCursor_LeafRootReverse(t *testing.T) { func TestCursor_LeafRootReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last() db.Update(func(tx *Tx) error {
assert.Equal(t, string(k), "foo") tx.CreateBucket([]byte("widgets"))
assert.Equal(t, v, []byte{0}) tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
k, v = c.Prev() tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
assert.Equal(t, string(k), "baz") return nil
assert.Equal(t, v, []byte{})
k, v = c.Prev()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
}) })
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Prev()
assert.Equal(t, string(k), "baz")
assert.Equal(t, v, []byte{})
k, v = c.Prev()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
} }
// Ensure that a Tx cursor can restart from the beginning. // Ensure that a Tx cursor can restart from the beginning.
func TestCursor_Restart(t *testing.T) { func TestCursor_Restart(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
return nil
})
tx, _ := db.Begin(false) db.Update(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor() tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
k, _ := c.First() tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
assert.Equal(t, string(k), "bar") return nil
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
k, _ = c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
tx.Rollback()
}) })
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, _ := c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
k, _ = c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
tx.Rollback()
} }
// Ensure that a Tx can iterate over all elements in a bucket. // Ensure that a Tx can iterate over all elements in a bucket.
func TestCursor_QuickCheck(t *testing.T) { func TestCursor_QuickCheck(t *testing.T) {
f := func(items testdata) bool { f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Bulk insert all values. defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data. // Bulk insert all values.
sort.Sort(items) tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data.
sort.Sort(items)
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true return true
} }
if err := quick.Check(f, qconfig()); err != nil { if err := quick.Check(f, qconfig()); err != nil {
@ -333,31 +341,33 @@ func TestCursor_QuickCheck(t *testing.T) {
// Ensure that a transaction can iterate over all elements in a bucket in reverse. // Ensure that a transaction can iterate over all elements in a bucket in reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) { func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool { f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Bulk insert all values. defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data. // Bulk insert all values.
sort.Sort(revtestdata(items)) tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data.
sort.Sort(revtestdata(items))
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true return true
} }
if err := quick.Check(f, qconfig()); err != nil { if err := quick.Check(f, qconfig()); err != nil {
@ -367,54 +377,56 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
// Ensure that a Tx cursor can iterate over subbuckets. // Ensure that a Tx cursor can iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) db.Update(func(tx *Tx) error {
_, err = b.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) assert.NoError(t, err)
_, err = b.CreateBucket([]byte("bar")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) assert.NoError(t, err)
_, err = b.CreateBucket([]byte("baz")) _, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err) assert.NoError(t, err)
return nil _, err = b.CreateBucket([]byte("baz"))
}) assert.NoError(t, err)
db.View(func(tx *Tx) error { return nil
var names []string })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *Tx) error {
for k, v := c.First(); k != nil; k, v = c.Next() { var names []string
names = append(names, string(k)) c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, v) for k, v := c.First(); k != nil; k, v = c.Next() {
} names = append(names, string(k))
assert.Equal(t, names, []string{"bar", "baz", "foo"}) assert.Nil(t, v)
return nil }
}) assert.Equal(t, names, []string{"bar", "baz", "foo"})
return nil
}) })
} }
// Ensure that a Tx cursor can reverse iterate over subbuckets. // Ensure that a Tx cursor can reverse iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) db.Update(func(tx *Tx) error {
_, err = b.CreateBucket([]byte("foo")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) assert.NoError(t, err)
_, err = b.CreateBucket([]byte("bar")) _, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err) assert.NoError(t, err)
_, err = b.CreateBucket([]byte("baz")) _, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err) assert.NoError(t, err)
return nil _, err = b.CreateBucket([]byte("baz"))
}) assert.NoError(t, err)
db.View(func(tx *Tx) error { return nil
var names []string })
c := tx.Bucket([]byte("widgets")).Cursor() db.View(func(tx *Tx) error {
for k, v := c.Last(); k != nil; k, v = c.Prev() { var names []string
names = append(names, string(k)) c := tx.Bucket([]byte("widgets")).Cursor()
assert.Nil(t, v) for k, v := c.Last(); k != nil; k, v = c.Prev() {
} names = append(names, string(k))
assert.Equal(t, names, []string{"foo", "baz", "bar"}) assert.Nil(t, v)
return nil }
}) assert.Equal(t, names, []string{"foo", "baz", "bar"})
return nil
}) })
} }

View File

@ -206,14 +206,14 @@ func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
// Ensure that a read-write transaction can be retrieved. // Ensure that a read-write transaction can be retrieved.
func TestDB_BeginRW(t *testing.T) { func TestDB_BeginRW(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, err := db.Begin(true) defer db.Close()
assert.NotNil(t, tx) tx, err := db.Begin(true)
assert.NoError(t, err) assert.NotNil(t, tx)
assert.Equal(t, tx.DB(), db) assert.NoError(t, err)
assert.Equal(t, tx.Writable(), true) assert.Equal(t, tx.DB(), db)
assert.NoError(t, tx.Commit()) assert.Equal(t, tx.Writable(), true)
}) assert.NoError(t, tx.Commit())
} }
// Ensure that opening a transaction while the DB is closed returns an error. // Ensure that opening a transaction while the DB is closed returns an error.
@ -226,23 +226,23 @@ func TestDB_BeginRW_Closed(t *testing.T) {
// Ensure a database can provide a transactional block. // Ensure a database can provide a transactional block.
func TestDB_Update(t *testing.T) { func TestDB_Update(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
err := db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) err := db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar")) b := tx.Bucket([]byte("widgets"))
b.Put([]byte("baz"), []byte("bat")) b.Put([]byte("foo"), []byte("bar"))
b.Delete([]byte("foo")) b.Put([]byte("baz"), []byte("bat"))
return nil b.Delete([]byte("foo"))
}) return nil
assert.NoError(t, err)
err = db.View(func(tx *Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
assert.NoError(t, err)
}) })
assert.NoError(t, err)
err = db.View(func(tx *Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
assert.NoError(t, err)
} }
// Ensure a closed database returns an error while running a transaction block // Ensure a closed database returns an error while running a transaction block
@ -273,69 +273,70 @@ func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
// Ensure a write transaction that panics does not hold open locks. // Ensure a write transaction that panics does not hold open locks.
func TestDB_Update_Panic(t *testing.T) { func TestDB_Update_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
func() { defer db.Close()
defer func() {
if r := recover(); r != nil { func() {
warn("recover: update", r) defer func() {
} if r := recover(); r != nil {
}() warn("recover: update", r)
db.Update(func(tx *Tx) error { }
tx.CreateBucket([]byte("widgets"))
panic("omg")
})
}() }()
db.Update(func(tx *Tx) error {
// Verify we can update again. tx.CreateBucket([]byte("widgets"))
err := db.Update(func(tx *Tx) error { panic("omg")
_, err := tx.CreateBucket([]byte("widgets"))
return err
}) })
assert.NoError(t, err) }()
// Verify that our change persisted. // Verify we can update again.
err = db.Update(func(tx *Tx) error { err := db.Update(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) _, err := tx.CreateBucket([]byte("widgets"))
return nil return err
}) })
assert.NoError(t, err)
// Verify that our change persisted.
err = db.Update(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
}) })
} }
// Ensure a database can return an error through a read-only transactional block. // Ensure a database can return an error through a read-only transactional block.
func TestDB_View_Error(t *testing.T) { func TestDB_View_Error(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
err := db.View(func(tx *Tx) error { defer db.Close()
return errors.New("xxx") err := db.View(func(tx *Tx) error {
}) return errors.New("xxx")
assert.Equal(t, errors.New("xxx"), err)
}) })
assert.Equal(t, errors.New("xxx"), err)
} }
// Ensure a read transaction that panics does not hold open locks. // Ensure a read transaction that panics does not hold open locks.
func TestDB_View_Panic(t *testing.T) { func TestDB_View_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
return nil tx.CreateBucket([]byte("widgets"))
}) return nil
})
func() { func() {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
warn("recover: view", r) warn("recover: view", r)
} }
}()
db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
panic("omg")
})
}() }()
// Verify that we can still use read transactions.
db.View(func(tx *Tx) error { db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets"))) assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil panic("omg")
}) })
}()
// Verify that we can still use read transactions.
db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
}) })
} }
@ -346,16 +347,16 @@ func TestDB_Commit_WriteFail(t *testing.T) {
// Ensure that DB stats can be returned. // Ensure that DB stats can be returned.
func TestDB_Stats(t *testing.T) { func TestDB_Stats(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
stats := db.Stats()
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
}) })
stats := db.Stats()
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
} }
// Ensure that the mmap grows appropriately. // Ensure that the mmap grows appropriately.
@ -373,41 +374,41 @@ func TestDB_mmapSize(t *testing.T) {
// Ensure that database pages are in expected order and type. // Ensure that database pages are in expected order and type.
func TestDB_Consistency(t *testing.T) { func TestDB_Consistency(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
_, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
})
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
db.Update(func(tx *Tx) error {
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
}
db.Update(func(tx *Tx) error { db.Update(func(tx *Tx) error {
if p, _ := tx.Page(0); assert.NotNil(t, p) { assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(1); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(2); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(3); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(4); assert.NotNil(t, p) {
assert.Equal(t, "leaf", p.Type) // root leaf
}
if p, _ := tx.Page(5); assert.NotNil(t, p) {
assert.Equal(t, "freelist", p.Type)
}
p, _ := tx.Page(6)
assert.Nil(t, p)
return nil return nil
}) })
}
db.Update(func(tx *Tx) error {
if p, _ := tx.Page(0); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(1); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(2); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(3); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(4); assert.NotNil(t, p) {
assert.Equal(t, "leaf", p.Type) // root leaf
}
if p, _ := tx.Page(5); assert.NotNil(t, p) {
assert.Equal(t, "freelist", p.Type)
}
p, _ := tx.Page(6)
assert.Nil(t, p)
return nil
}) })
} }
@ -451,16 +452,17 @@ func TestDB_StrictMode(t *testing.T) {
msg = fmt.Sprintf("%s", recover()) msg = fmt.Sprintf("%s", recover())
}() }()
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.StrictMode = true defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
// Corrupt the DB by extending the high water mark. db.StrictMode = true
tx.meta.pgid++ db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
return nil // Corrupt the DB by extending the high water mark.
}) tx.meta.pgid++
return nil
}) })
}() }()
@ -474,15 +476,18 @@ func TestDB_DoubleFree(t *testing.T) {
defer func() { defer func() {
msg = fmt.Sprintf("%s", recover()) msg = fmt.Sprintf("%s", recover())
}() }()
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
// Corrupt the DB by adding a page to the freelist. db := NewTestDB()
db.freelist.free(0, tx.page(3)) defer os.Remove(db.DB.Path())
defer db.DB.Close()
return nil db.Update(func(tx *Tx) error {
}) tx.CreateBucket([]byte("foo"))
// Corrupt the DB by adding a page to the freelist.
db.freelist.free(0, tx.page(3))
return nil
}) })
}() }()
@ -580,37 +585,53 @@ func ExampleDB_Begin_ReadOnly() {
// zephyr likes purple // zephyr likes purple
} }
// tempfile returns a temporary file path. // TestDB represents a wrapper around a Bolt DB to handle temporary file
func tempfile() string { // creation and automatic cleanup on close.
f, _ := ioutil.TempFile("", "bolt-") type TestDB struct {
f.Close() *DB
os.Remove(f.Name())
return f.Name()
} }
// withOpenDB executes a function with an already opened database. // NewTestDB returns a new instance of TestDB.
func withOpenDB(fn func(*DB, string)) { func NewTestDB() *TestDB {
path := tempfile() db, err := Open(tempfile(), 0666, nil)
defer os.Remove(path)
db, err := Open(path, 0666, nil)
if err != nil { if err != nil {
panic("cannot open db: " + err.Error()) panic("cannot open db: " + err.Error())
} }
defer db.Close() return &TestDB{db}
fn(db, path) }
// Close closes the database and deletes the underlying file.
func (db *TestDB) Close() {
// Log statistics. // Log statistics.
if *statsFlag { if *statsFlag {
logStats(db) db.PrintStats()
} }
// Check database consistency after every test. // Check database consistency after every test.
mustCheck(db) db.MustCheck()
// Close database and remove file.
defer os.Remove(db.Path())
db.DB.Close()
} }
// mustCheck runs a consistency check on the database and panics if any errors are found. // PrintStats prints the database stats
func mustCheck(db *DB) { func (db *TestDB) PrintStats() {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
// MustCheck runs a consistency check on the database and panics if any errors are found.
func (db *TestDB) MustCheck() {
db.View(func(tx *Tx) error { db.View(func(tx *Tx) error {
// Collect all the errors. // Collect all the errors.
var errors []error var errors []error
@ -643,6 +664,21 @@ func mustCheck(db *DB) {
}) })
} }
// CopyTempFile copies a database to a temporary file.
func (db *TestDB) CopyTempFile() {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys. // mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *Bucket, m map[string]string) { func mustContainKeys(b *Bucket, m map[string]string) {
found := make(map[string]string) found := make(map[string]string)
@ -682,29 +718,6 @@ func trunc(b []byte, length int) []byte {
return b return b
} }
// writes the current database stats to the testing log.
func logStats(db *DB) {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
func truncDuration(d time.Duration) string { func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
} }
// copyAndFailNow copies a database to a new location and then fails then test.
func copyAndFailNow(t *testing.T, db *DB) {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
t.FailNow()
}

View File

@ -41,78 +41,80 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
var versions = make(map[txid]*QuickDB) var versions = make(map[txid]*QuickDB)
versions[1] = NewQuickDB() versions[1] = NewQuickDB()
withOpenDB(func(db *DB, path string) {
var mutex sync.Mutex
// Run n threads in parallel, each with their own operation. db := NewTestDB()
var wg sync.WaitGroup defer db.Close()
var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
// Choose an operation to execute. var mutex sync.Mutex
var handler simulateHandler
if writable {
handler = writerHandlers[rand.Intn(len(writerHandlers))]
} else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
}
// Execute a thread for the given operation. // Run n threads in parallel, each with their own operation.
go func(writable bool, handler simulateHandler) { var wg sync.WaitGroup
defer wg.Done() var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
// Start transaction. // Choose an operation to execute.
tx, err := db.Begin(writable) var handler simulateHandler
if err != nil { if writable {
t.Fatal("tx begin: ", err) handler = writerHandlers[rand.Intn(len(writerHandlers))]
} } else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.id()]
if writable {
qdb = versions[tx.id()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.id()] = qdb
mutex.Unlock()
assert.NoError(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
} }
// Wait until all threads are done. // Execute a thread for the given operation.
wg.Wait() go func(writable bool, handler simulateHandler) {
}) defer wg.Done()
// Start transaction.
tx, err := db.Begin(writable)
if err != nil {
t.Fatal("tx begin: ", err)
}
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.id()]
if writable {
qdb = versions[tx.id()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.id()] = qdb
mutex.Unlock()
assert.NoError(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
}
// Wait until all threads are done.
wg.Wait()
} }
type simulateHandler func(tx *Tx, qdb *QuickDB) type simulateHandler func(tx *Tx, qdb *QuickDB)

View File

@ -11,265 +11,267 @@ import (
// Ensure that committing a closed transaction returns an error. // Ensure that committing a closed transaction returns an error.
func TestTx_Commit_Closed(t *testing.T) { func TestTx_Commit_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
tx.CreateBucket([]byte("foo")) tx, _ := db.Begin(true)
assert.NoError(t, tx.Commit()) tx.CreateBucket([]byte("foo"))
assert.Equal(t, tx.Commit(), ErrTxClosed) assert.NoError(t, tx.Commit())
}) assert.Equal(t, tx.Commit(), ErrTxClosed)
} }
// Ensure that rolling back a closed transaction returns an error. // Ensure that rolling back a closed transaction returns an error.
func TestTx_Rollback_Closed(t *testing.T) { func TestTx_Rollback_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
assert.NoError(t, tx.Rollback()) tx, _ := db.Begin(true)
assert.Equal(t, tx.Rollback(), ErrTxClosed) assert.NoError(t, tx.Rollback())
}) assert.Equal(t, tx.Rollback(), ErrTxClosed)
} }
// Ensure that committing a read-only transaction returns an error. // Ensure that committing a read-only transaction returns an error.
func TestTx_Commit_ReadOnly(t *testing.T) { func TestTx_Commit_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(false) defer db.Close()
assert.Equal(t, tx.Commit(), ErrTxNotWritable) tx, _ := db.Begin(false)
}) assert.Equal(t, tx.Commit(), ErrTxNotWritable)
} }
// Ensure that a transaction can retrieve a cursor on the root bucket. // Ensure that a transaction can retrieve a cursor on the root bucket.
func TestTx_Cursor(t *testing.T) { func TestTx_Cursor(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("woojits")) tx.CreateBucket([]byte("widgets"))
c := tx.Cursor() tx.CreateBucket([]byte("woojits"))
c := tx.Cursor()
k, v := c.First() k, v := c.First()
assert.Equal(t, "widgets", string(k)) assert.Equal(t, "widgets", string(k))
assert.Nil(t, v) assert.Nil(t, v)
k, v = c.Next() k, v = c.Next()
assert.Equal(t, "woojits", string(k)) assert.Equal(t, "woojits", string(k))
assert.Nil(t, v) assert.Nil(t, v)
k, v = c.Next() k, v = c.Next()
assert.Nil(t, k) assert.Nil(t, k)
assert.Nil(t, v) assert.Nil(t, v)
return nil return nil
})
}) })
} }
// Ensure that creating a bucket with a read-only transaction returns an error. // Ensure that creating a bucket with a read-only transaction returns an error.
func TestTx_CreateBucket_ReadOnly(t *testing.T) { func TestTx_CreateBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.View(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket([]byte("foo")) db.View(func(tx *Tx) error {
assert.Nil(t, b) b, err := tx.CreateBucket([]byte("foo"))
assert.Equal(t, ErrTxNotWritable, err) assert.Nil(t, b)
return nil assert.Equal(t, ErrTxNotWritable, err)
}) return nil
}) })
} }
// Ensure that creating a bucket on a closed transaction returns an error. // Ensure that creating a bucket on a closed transaction returns an error.
func TestTx_CreateBucket_Closed(t *testing.T) { func TestTx_CreateBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
tx.Commit() tx, _ := db.Begin(true)
b, err := tx.CreateBucket([]byte("foo")) tx.Commit()
assert.Nil(t, b) b, err := tx.CreateBucket([]byte("foo"))
assert.Equal(t, ErrTxClosed, err) assert.Nil(t, b)
}) assert.Equal(t, ErrTxClosed, err)
} }
// Ensure that a Tx can retrieve a bucket. // Ensure that a Tx can retrieve a bucket.
func TestTx_Bucket(t *testing.T) { func TestTx_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets")) tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) b := tx.Bucket([]byte("widgets"))
return nil assert.NotNil(t, b)
}) return nil
}) })
} }
// Ensure that a Tx retrieving a non-existent key returns nil. // Ensure that a Tx retrieving a non-existent key returns nil.
func TestTx_Get_Missing(t *testing.T) { func TestTx_Get_Missing(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
assert.Nil(t, value) value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
return nil assert.Nil(t, value)
}) return nil
}) })
} }
// Ensure that a bucket can be created and retrieved. // Ensure that a bucket can be created and retrieved.
func TestTx_CreateBucket(t *testing.T) { func TestTx_CreateBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Create a bucket. defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
return nil
})
// Read the bucket through a separate transaction. // Create a bucket.
db.View(func(tx *Tx) error { db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b) assert.NotNil(t, b)
return nil assert.NoError(t, err)
}) return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
}) })
} }
// Ensure that a bucket can be created if it doesn't already exist. // Ensure that a bucket can be created if it doesn't already exist.
func TestTx_CreateBucketIfNotExists(t *testing.T) { func TestTx_CreateBucketIfNotExists(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucketIfNotExists([]byte("widgets")) db.Update(func(tx *Tx) error {
assert.NotNil(t, b) b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NoError(t, err) assert.NotNil(t, b)
assert.NoError(t, err)
b, err = tx.CreateBucketIfNotExists([]byte("widgets")) b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b) assert.NotNil(t, b)
assert.NoError(t, err) assert.NoError(t, err)
b, err = tx.CreateBucketIfNotExists([]byte{}) b, err = tx.CreateBucketIfNotExists([]byte{})
assert.Nil(t, b) assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err) assert.Equal(t, ErrBucketNameRequired, err)
b, err = tx.CreateBucketIfNotExists(nil) b, err = tx.CreateBucketIfNotExists(nil)
assert.Nil(t, b) assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err) assert.Equal(t, ErrBucketNameRequired, err)
return nil return nil
}) })
// Read the bucket through a separate transaction. // Read the bucket through a separate transaction.
db.View(func(tx *Tx) error { db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets")) b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b) assert.NotNil(t, b)
return nil return nil
})
}) })
} }
// Ensure that a bucket cannot be created twice. // Ensure that a bucket cannot be created twice.
func TestTx_CreateBucket_Exists(t *testing.T) { func TestTx_CreateBucket_Exists(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Create a bucket. defer db.Close()
db.Update(func(tx *Tx) error { // Create a bucket.
b, err := tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
assert.NotNil(t, b) b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err) assert.NotNil(t, b)
return nil assert.NoError(t, err)
}) return nil
})
// Create the same bucket again. // Create the same bucket again.
db.Update(func(tx *Tx) error { db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) b, err := tx.CreateBucket([]byte("widgets"))
assert.Nil(t, b) assert.Nil(t, b)
assert.Equal(t, ErrBucketExists, err) assert.Equal(t, ErrBucketExists, err)
return nil return nil
})
}) })
} }
// Ensure that a bucket is created with a non-blank name. // Ensure that a bucket is created with a non-blank name.
func TestTx_CreateBucket_NameRequired(t *testing.T) { func TestTx_CreateBucket_NameRequired(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
b, err := tx.CreateBucket(nil) db.Update(func(tx *Tx) error {
assert.Nil(t, b) b, err := tx.CreateBucket(nil)
assert.Equal(t, ErrBucketNameRequired, err) assert.Nil(t, b)
return nil assert.Equal(t, ErrBucketNameRequired, err)
}) return nil
}) })
} }
// Ensure that a bucket can be deleted. // Ensure that a bucket can be deleted.
func TestTx_DeleteBucket(t *testing.T) { func TestTx_DeleteBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
// Create a bucket and add a value. defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
// Save root page id. // Create a bucket and add a value.
var root pgid db.Update(func(tx *Tx) error {
db.View(func(tx *Tx) error { tx.CreateBucket([]byte("widgets"))
root = tx.Bucket([]byte("widgets")).root tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil return nil
}) })
// Delete the bucket and make sure we can't get the value. // Save root page id.
db.Update(func(tx *Tx) error { var root pgid
assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) db.View(func(tx *Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets"))) root = tx.Bucket([]byte("widgets")).root
return nil return nil
}) })
db.Update(func(tx *Tx) error { // Delete the bucket and make sure we can't get the value.
// Verify that the bucket's page is free. db.Update(func(tx *Tx) error {
assert.Equal(t, []pgid{4, 5}, db.freelist.all()) assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
assert.Nil(t, tx.Bucket([]byte("widgets")))
return nil
})
// Create the bucket again and make sure there's not a phantom value. db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets")) // Verify that the bucket's page is free.
assert.NotNil(t, b) assert.Equal(t, []pgid{4, 5}, db.freelist.all())
assert.NoError(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) // Create the bucket again and make sure there's not a phantom value.
return nil b, err := tx.CreateBucket([]byte("widgets"))
}) assert.NotNil(t, b)
assert.NoError(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
}) })
} }
// Ensure that deleting a bucket on a closed transaction returns an error. // Ensure that deleting a bucket on a closed transaction returns an error.
func TestTx_DeleteBucket_Closed(t *testing.T) { func TestTx_DeleteBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
tx, _ := db.Begin(true) defer db.Close()
tx.Commit() tx, _ := db.Begin(true)
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed) tx.Commit()
}) assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
} }
// Ensure that deleting a bucket with a read-only transaction returns an error. // Ensure that deleting a bucket with a read-only transaction returns an error.
func TestTx_DeleteBucket_ReadOnly(t *testing.T) { func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.View(func(tx *Tx) error { defer db.Close()
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable) db.View(func(tx *Tx) error {
return nil assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
}) return nil
}) })
} }
// Ensure that nothing happens when deleting a bucket that doesn't exist. // Ensure that nothing happens when deleting a bucket that doesn't exist.
func TestTx_DeleteBucket_NotFound(t *testing.T) { func TestTx_DeleteBucket_NotFound(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) db.Update(func(tx *Tx) error {
return nil assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
}) return nil
}) })
} }
// Ensure that Tx commit handlers are called after a transaction successfully commits. // Ensure that Tx commit handlers are called after a transaction successfully commits.
func TestTx_OnCommit(t *testing.T) { func TestTx_OnCommit(t *testing.T) {
var x int var x int
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.OnCommit(func() { x += 1 }) db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 2 }) tx.OnCommit(func() { x += 1 })
_, err := tx.CreateBucket([]byte("widgets")) tx.OnCommit(func() { x += 2 })
return err _, err := tx.CreateBucket([]byte("widgets"))
}) return err
}) })
assert.Equal(t, 3, x) assert.Equal(t, 3, x)
} }
@ -277,39 +279,39 @@ func TestTx_OnCommit(t *testing.T) {
// Ensure that Tx commit handlers are NOT called after a transaction rolls back. // Ensure that Tx commit handlers are NOT called after a transaction rolls back.
func TestTx_OnCommit_Rollback(t *testing.T) { func TestTx_OnCommit_Rollback(t *testing.T) {
var x int var x int
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.OnCommit(func() { x += 1 }) db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 2 }) tx.OnCommit(func() { x += 1 })
tx.CreateBucket([]byte("widgets")) tx.OnCommit(func() { x += 2 })
return errors.New("rollback this commit") tx.CreateBucket([]byte("widgets"))
}) return errors.New("rollback this commit")
}) })
assert.Equal(t, 0, x) assert.Equal(t, 0, x)
} }
// Ensure that the database can be copied to a file path. // Ensure that the database can be copied to a file path.
func TestTx_CopyFile(t *testing.T) { func TestTx_CopyFile(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
var dest = tempfile() defer db.Close()
db.Update(func(tx *Tx) error { var dest = tempfile()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
}) return nil
})
assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) })) assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) }))
db2, err := Open(dest, 0600, nil) db2, err := Open(dest, 0600, nil)
assert.NoError(t, err) assert.NoError(t, err)
defer db2.Close() defer db2.Close()
db2.View(func(tx *Tx) error { db2.View(func(tx *Tx) error {
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil return nil
})
}) })
} }
@ -336,32 +338,32 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
// Ensure that Copy handles write errors right. // Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Meta(t *testing.T) { func TestTx_CopyFile_Error_Meta(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
}) return nil
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests")
}) })
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests")
} }
// Ensure that Copy handles write errors right. // Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Normal(t *testing.T) { func TestTx_CopyFile_Error_Normal(t *testing.T) {
withOpenDB(func(db *DB, path string) { db := NewTestDB()
db.Update(func(tx *Tx) error { defer db.Close()
tx.CreateBucket([]byte("widgets")) db.Update(func(tx *Tx) error {
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
}) return nil
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
assert.EqualError(t, err, "error injected for tests")
}) })
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
assert.EqualError(t, err, "error injected for tests")
} }
func ExampleTx_Rollback() { func ExampleTx_Rollback() {