diff --git a/bolt_test.go b/bolt_test.go new file mode 100644 index 0000000..b7bea1f --- /dev/null +++ b/bolt_test.go @@ -0,0 +1,36 @@ +package bolt_test + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +// assert fails the test if the condition is false. +func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// ok fails the test if an err is not nil. +func ok(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.FailNow() + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/bucket.go b/bucket.go index 1ef0d5e..2b0e340 100644 --- a/bucket.go +++ b/bucket.go @@ -634,7 +634,7 @@ func (b *Bucket) free() { var tx = b.tx b.forEachPageNode(func(p *page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.id(), p) + tx.db.freelist.free(tx.meta.txid, p) } else { n.free() } diff --git a/bucket_test.go b/bucket_test.go index 029ff2b..90e704a 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -1,4 +1,4 @@ -package bolt +package bolt_test import ( "bytes" @@ -12,98 +12,98 @@ import ( "testing" "testing/quick" - "github.com/stretchr/testify/assert" + "github.com/boltdb/bolt" ) // Ensure that a bucket that gets a non-existent key returns nil. func TestBucket_Get_NonExistent(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Nil(t, value) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + assert(t, value == nil, "") + return nil }) } // Ensure that a bucket can read a value that is not flushed yet. func TestBucket_Get_FromNode(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - value := b.Get([]byte("foo")) - assert.Equal(t, value, []byte("bar")) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + b.Put([]byte("foo"), []byte("bar")) + value := b.Get([]byte("foo")) + equals(t, []byte("bar"), value) + return nil }) } // Ensure that a bucket retrieved via Get() returns a nil. func TestBucket_Get_IncompatibleValue(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + return nil }) } // Ensure that a bucket can write a key/value. func TestBucket_Put(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - assert.NoError(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Equal(t, value, []byte("bar")) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + ok(t, err) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + equals(t, value, []byte("bar")) + return nil }) } // Ensure that a bucket can rewrite a key in the same transaction. func TestBucket_Put_Repeat(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - assert.NoError(t, b.Put([]byte("foo"), []byte("bar"))) - assert.NoError(t, b.Put([]byte("foo"), []byte("baz"))) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Equal(t, value, []byte("baz")) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + ok(t, b.Put([]byte("foo"), []byte("bar"))) + ok(t, b.Put([]byte("foo"), []byte("baz"))) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + equals(t, value, []byte("baz")) + return nil }) } // Ensure that a bucket can write a bunch of large values. func TestBucket_Put_Large(t *testing.T) { - var count = 100 - var factor = 200 - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) - } - return nil - }) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - value := b.Get([]byte(strings.Repeat("0", i*factor))) - assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value) - } - return nil - }) + db := NewTestDB() + defer db.Close() + + count, factor := 100, 200 + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + for i := 1; i < count; i++ { + ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) + } + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i < count; i++ { + value := b.Get([]byte(strings.Repeat("0", i*factor))) + equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) + } + return nil }) } @@ -116,102 +116,103 @@ func TestDB_Put_VeryLarge(t *testing.T) { n, batchN := 400000, 200000 ksize, vsize := 8, 500 - withOpenDB(func(db *DB, path string) { - for i := 0; i < n; i += batchN { - err := db.Update(func(tx *Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for j := 0; j < batchN; j++ { - k, v := make([]byte, ksize), make([]byte, vsize) - binary.BigEndian.PutUint32(k, uint32(i+j)) - assert.NoError(t, b.Put(k, v)) - } - return nil - }) - assert.NoError(t, err) - } - }) + db := NewTestDB() + defer db.Close() + + for i := 0; i < n; i += batchN { + err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) + for j := 0; j < batchN; j++ { + k, v := make([]byte, ksize), make([]byte, vsize) + binary.BigEndian.PutUint32(k, uint32(i+j)) + ok(t, b.Put(k, v)) + } + return nil + }) + ok(t, err) + } } // Ensure that a setting a value on a key with a bucket value returns an error. func TestBucket_Put_IncompatibleValue(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + return nil }) } // Ensure that a setting a value while the transaction is closed returns an error. func TestBucket_Put_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - assert.Equal(t, ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) } // Ensure that setting a value on a read-only bucket returns an error. func TestBucket_Put_ReadOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - return nil - }) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Put([]byte("foo"), []byte("bar")) - assert.Equal(t, err, ErrTxNotWritable) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + err := b.Put([]byte("foo"), []byte("bar")) + equals(t, err, bolt.ErrTxNotWritable) + return nil }) } // Ensure that a bucket can delete an existing key. func TestBucket_Delete(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - assert.NoError(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Nil(t, value) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) + ok(t, err) + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + assert(t, value == nil, "") + return nil }) } // Ensure that deleting a large set of keys will work correctly. func TestBucket_Delete_Large(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - var b, _ = tx.CreateBucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) - } - return nil - }) - db.Update(func(tx *Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert.NoError(t, b.Delete([]byte(strconv.Itoa(i)))) - } - return nil - }) - db.View(func(tx *Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert.Nil(t, b.Get([]byte(strconv.Itoa(i)))) - } - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + var b, _ = tx.CreateBucket([]byte("widgets")) + for i := 0; i < 100; i++ { + ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) + } + return nil + }) + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + ok(t, b.Delete([]byte(strconv.Itoa(i)))) + } + return nil + }) + db.View(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") + } + return nil }) } @@ -221,446 +222,447 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { t.Skip("skipping test in short mode.") } - withOpenDB(func(db *DB, path string) { - k := make([]byte, 16) - for i := uint64(0); i < 10000; i++ { - err := db.Update(func(tx *Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } - - for j := uint64(0); j < 1000; j++ { - binary.BigEndian.PutUint64(k[:8], i) - binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } - } - - return nil - }) - + db := NewTestDB() + defer db.Close() + k := make([]byte, 16) + for i := uint64(0); i < 10000; i++ { + err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("0")) if err != nil { - t.Fatalf("update error: %s", err) + t.Fatalf("bucket error: %s", err) } - } - // Delete all of them in one large transaction - err := db.Update(func(tx *Tx) error { - b := tx.Bucket([]byte("0")) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - b.Delete(k) + for j := uint64(0); j < 1000; j++ { + binary.BigEndian.PutUint64(k[:8], i) + binary.BigEndian.PutUint64(k[8:], j) + if err := b.Put(k, nil); err != nil { + t.Fatalf("put error: %s", err) + } } + return nil }) - // Check that a freelist overflow occurred. - assert.NoError(t, err) + if err != nil { + t.Fatalf("update error: %s", err) + } + } + + // Delete all of them in one large transaction + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("0")) + c := b.Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + b.Delete(k) + } + return nil }) + + // Check that a freelist overflow occurred. + ok(t, err) } // Ensure that accessing and updating nested buckets is ok across transactions. func TestBucket_Nested(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - // Create a widgets bucket. - b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + // Create a widgets bucket. + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) - // Create a widgets/foo bucket. - _, err = b.CreateBucket([]byte("foo")) - assert.NoError(t, err) + // Create a widgets/foo bucket. + _, err = b.CreateBucket([]byte("foo")) + ok(t, err) - // Create a widgets/bar key. - assert.NoError(t, b.Put([]byte("bar"), []byte("0000"))) + // Create a widgets/bar key. + ok(t, b.Put([]byte("bar"), []byte("0000"))) - return nil - }) - mustCheck(db) + return nil + }) + db.MustCheck() - // Update widgets/bar. - db.Update(func(tx *Tx) error { - var b = tx.Bucket([]byte("widgets")) - assert.NoError(t, b.Put([]byte("bar"), []byte("xxxx"))) - return nil - }) - mustCheck(db) + // Update widgets/bar. + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + ok(t, b.Put([]byte("bar"), []byte("xxxx"))) + return nil + }) + db.MustCheck() - // Cause a split. - db.Update(func(tx *Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 10000; i++ { - assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) - } - return nil - }) - mustCheck(db) + // Cause a split. + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + for i := 0; i < 10000; i++ { + ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) + } + return nil + }) + db.MustCheck() - // Insert into widgets/foo/baz. - db.Update(func(tx *Tx) error { - var b = tx.Bucket([]byte("widgets")) - assert.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) - return nil - }) - mustCheck(db) + // Insert into widgets/foo/baz. + db.Update(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) + return nil + }) + db.MustCheck() - // Verify. - db.View(func(tx *Tx) error { - var b = tx.Bucket([]byte("widgets")) - assert.Equal(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) - assert.Equal(t, []byte("xxxx"), b.Get([]byte("bar"))) - for i := 0; i < 10000; i++ { - assert.Equal(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) - } - return nil - }) + // Verify. + db.View(func(tx *bolt.Tx) error { + var b = tx.Bucket([]byte("widgets")) + equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) + equals(t, []byte("xxxx"), b.Get([]byte("bar"))) + for i := 0; i < 10000; i++ { + equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) + } + return nil }) } // Ensure that deleting a bucket using Delete() returns an error. func TestBucket_Delete_Bucket(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - _, err := b.CreateBucket([]byte("foo")) - assert.NoError(t, err) - assert.Equal(t, ErrIncompatibleValue, b.Delete([]byte("foo"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + _, err := b.CreateBucket([]byte("foo")) + ok(t, err) + equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) + return nil }) } // Ensure that deleting a key on a read-only bucket returns an error. func TestBucket_Delete_ReadOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Delete([]byte("foo")) - assert.Equal(t, err, ErrTxNotWritable) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + err := b.Delete([]byte("foo")) + equals(t, err, bolt.ErrTxNotWritable) + return nil }) } // Ensure that a deleting value while the transaction is closed returns an error. func TestBucket_Delete_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - assert.Equal(t, ErrTxClosed, b.Delete([]byte("foo"))) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) } // Ensure that deleting a bucket causes nested buckets to be deleted. func TestBucket_DeleteBucket_Nested(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + return nil }) } // Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. func TestBucket_DeleteBucket_Nested2(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - return nil - }) - db.Update(func(tx *Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) - assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))) - assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar"))) - assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) - assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - db.View(func(tx *Tx) error { - assert.Nil(t, tx.Bucket([]byte("widgets"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + return nil + }) + db.Update(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) + ok(t, tx.DeleteBucket([]byte("widgets"))) + return nil + }) + db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) == nil, "") + return nil }) } // Ensure that deleting a child bucket with multiple pages causes all pages to get collected. func TestBucket_DeleteBucket_Large(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) - for i := 0; i < 1000; i++ { - assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) - } - return nil - }) - db.Update(func(tx *Tx) error { - assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) - - // NOTE: Consistency check in withOpenDB() will error if pages not freed properly. + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + ok(t, err) + b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) + for i := 0; i < 1000; i++ { + ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) + } + return nil }) + db.Update(func(tx *bolt.Tx) error { + ok(t, tx.DeleteBucket([]byte("widgets"))) + return nil + }) + + // NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly. } // Ensure that a simple value retrieved via Bucket() returns a nil. func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") + return nil }) } // Ensure that creating a bucket on an existing non-bucket key returns an error. func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.Equal(t, ErrIncompatibleValue, err) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) + equals(t, bolt.ErrIncompatibleValue, err) + return nil }) } // Ensure that deleting a bucket on an existing non-bucket key returns an error. func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + return nil }) } // Ensure that a bucket can return an autoincrementing sequence. func TestBucket_NextSequence(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.CreateBucket([]byte("woojits")) - // Make sure sequence increments. - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - assert.NoError(t, err) - assert.Equal(t, seq, uint64(1)) - seq, err = tx.Bucket([]byte("widgets")).NextSequence() - assert.NoError(t, err) - assert.Equal(t, seq, uint64(2)) + // Make sure sequence increments. + seq, err := tx.Bucket([]byte("widgets")).NextSequence() + ok(t, err) + equals(t, seq, uint64(1)) + seq, err = tx.Bucket([]byte("widgets")).NextSequence() + ok(t, err) + equals(t, seq, uint64(2)) - // Buckets should be separate. - seq, err = tx.Bucket([]byte("woojits")).NextSequence() - assert.NoError(t, err) - assert.Equal(t, seq, uint64(1)) - return nil - }) + // Buckets should be separate. + seq, err = tx.Bucket([]byte("woojits")).NextSequence() + ok(t, err) + equals(t, seq, uint64(1)) + return nil }) } // Ensure that retrieving the next sequence on a read-only bucket returns an error. func TestBucket_NextSequence_ReadOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - i, err := b.NextSequence() - assert.Equal(t, i, uint64(0)) - assert.Equal(t, err, ErrTxNotWritable) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + i, err := b.NextSequence() + equals(t, i, uint64(0)) + equals(t, err, bolt.ErrTxNotWritable) + return nil }) } // Ensure that retrieving the next sequence for a bucket on a closed database return an error. func TestBucket_NextSequence_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - _, err := b.NextSequence() - assert.Equal(t, ErrTxClosed, err) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + _, err := b.NextSequence() + equals(t, bolt.ErrTxClosed, err) } // Ensure a user can loop over all key/value pairs in a bucket. func TestBucket_ForEach(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - switch index { - case 0: - assert.Equal(t, k, []byte("bar")) - assert.Equal(t, v, []byte("0002")) - case 1: - assert.Equal(t, k, []byte("baz")) - assert.Equal(t, v, []byte("0001")) - case 2: - assert.Equal(t, k, []byte("foo")) - assert.Equal(t, v, []byte("0000")) - } - index++ - return nil - }) - assert.NoError(t, err) - assert.Equal(t, index, 3) + var index int + err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + switch index { + case 0: + equals(t, k, []byte("bar")) + equals(t, v, []byte("0002")) + case 1: + equals(t, k, []byte("baz")) + equals(t, v, []byte("0001")) + case 2: + equals(t, k, []byte("foo")) + equals(t, v, []byte("0000")) + } + index++ return nil }) + ok(t, err) + equals(t, index, 3) + return nil }) } // Ensure a database can stop iteration early. func TestBucket_ForEach_ShortCircuit(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - index++ - if bytes.Equal(k, []byte("baz")) { - return errors.New("marker") - } - return nil - }) - assert.Equal(t, errors.New("marker"), err) - assert.Equal(t, 2, index) + var index int + err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + index++ + if bytes.Equal(k, []byte("baz")) { + return errors.New("marker") + } return nil }) + equals(t, errors.New("marker"), err) + equals(t, 2, index) + return nil }) } // Ensure that looping over a bucket on a closed database returns an error. func TestBucket_ForEach_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - err := b.ForEach(func(k, v []byte) error { return nil }) - assert.Equal(t, ErrTxClosed, err) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + tx.Rollback() + err := b.ForEach(func(k, v []byte) error { return nil }) + equals(t, bolt.ErrTxClosed, err) } // Ensure that an error is returned when inserting with an empty key. func TestBucket_Put_EmptyKey(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) - assert.Equal(t, err, ErrKeyRequired) - err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) - assert.Equal(t, err, ErrKeyRequired) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) + equals(t, err, bolt.ErrKeyRequired) + err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) + equals(t, err, bolt.ErrKeyRequired) + return nil }) } // Ensure that an error is returned when inserting with a key that's too large. func TestBucket_Put_KeyTooLarge(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) - assert.Equal(t, err, ErrKeyTooLarge) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) + equals(t, err, bolt.ErrKeyTooLarge) + return nil }) } // Ensure a bucket can calculate stats. func TestBucket_Stats(t *testing.T) { - withOpenDB(func(db *DB, path string) { - // Add bucket with fewer keys but one big value. - big_key := []byte("really-big-value") - for i := 0; i < 500; i++ { - db.Update(func(tx *Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) - }) - } - db.Update(func(tx *Tx) error { + db := NewTestDB() + defer db.Close() + + // Add bucket with fewer keys but one big value. + big_key := []byte("really-big-value") + for i := 0; i < 500; i++ { + db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put(big_key, []byte(strings.Repeat("*", 10000))) + return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) }) + } + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + return b.Put(big_key, []byte(strings.Repeat("*", 10000))) + }) - mustCheck(db) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("woojits")) - stats := b.Stats() - assert.Equal(t, 1, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 7, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 2, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 501, stats.KeyN, "KeyN") - assert.Equal(t, 2, stats.Depth, "Depth") + db.MustCheck() + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("woojits")) + stats := b.Stats() + equals(t, 1, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 7, stats.LeafPageN) + equals(t, 2, stats.LeafOverflowN) + equals(t, 501, stats.KeyN) + equals(t, 2, stats.Depth) - branchInuse := pageHeaderSize // branch page header - branchInuse += 7 * branchPageElementSize // branch elements - branchInuse += 7 * 3 // branch keys (6 3-byte keys) - assert.Equal(t, branchInuse, stats.BranchInuse, "BranchInuse") + branchInuse := 16 // branch page header + branchInuse += 7 * 16 // branch elements + branchInuse += 7 * 3 // branch keys (6 3-byte keys) + equals(t, branchInuse, stats.BranchInuse) - leafInuse := 7 * pageHeaderSize // leaf page header - leafInuse += 501 * leafPageElementSize // leaf elements - leafInuse += 500*3 + len(big_key) // leaf keys - leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - assert.Equal(t, leafInuse, stats.LeafInuse, "LeafInuse") + leafInuse := 7 * 16 // leaf page header + leafInuse += 501 * 16 // leaf elements + leafInuse += 500*3 + len(big_key) // leaf keys + leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values + equals(t, leafInuse, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - assert.Equal(t, 4096, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 36864, stats.LeafAlloc, "LeafAlloc") - } + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 4096, stats.BranchAlloc) + equals(t, 36864, stats.LeafAlloc) + } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse") - return nil - }) + equals(t, 1, stats.BucketN) + equals(t, 0, stats.InlineBucketN) + equals(t, 0, stats.InlineBucketInuse) + return nil }) } @@ -668,179 +670,179 @@ func TestBucket_Stats(t *testing.T) { func TestBucket_Stats_RandomFill(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") - } - if os.Getpagesize() != 4096 { + } else if os.Getpagesize() != 4096 { t.Skip("invalid page size for test") } - withOpenDB(func(db *DB, path string) { - // Add a set of values in random order. It will be the same random - // order so we can maintain consistency between test runs. - var count int - r := rand.New(rand.NewSource(42)) - for _, i := range r.Perm(1000) { - db.Update(func(tx *Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - b.FillPercent = 0.9 - for _, j := range r.Perm(100) { - index := (j * 10000) + i - b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) - count++ - } - return nil - }) - } - mustCheck(db) + db := NewTestDB() + defer db.Close() - db.View(func(tx *Tx) error { - s := tx.Bucket([]byte("woojits")).Stats() - assert.Equal(t, 100000, s.KeyN, "KeyN") - - assert.Equal(t, 98, s.BranchPageN, "BranchPageN") - assert.Equal(t, 0, s.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 130984, s.BranchInuse, "BranchInuse") - assert.Equal(t, 401408, s.BranchAlloc, "BranchAlloc") - - assert.Equal(t, 3412, s.LeafPageN, "LeafPageN") - assert.Equal(t, 0, s.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 4742482, s.LeafInuse, "LeafInuse") - assert.Equal(t, 13975552, s.LeafAlloc, "LeafAlloc") + // Add a set of values in random order. It will be the same random + // order so we can maintain consistency between test runs. + var count int + r := rand.New(rand.NewSource(42)) + for _, i := range r.Perm(1000) { + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + b.FillPercent = 0.9 + for _, j := range r.Perm(100) { + index := (j * 10000) + i + b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) + count++ + } return nil }) + } + db.MustCheck() + + db.View(func(tx *bolt.Tx) error { + s := tx.Bucket([]byte("woojits")).Stats() + equals(t, 100000, s.KeyN) + + equals(t, 98, s.BranchPageN) + equals(t, 0, s.BranchOverflowN) + equals(t, 130984, s.BranchInuse) + equals(t, 401408, s.BranchAlloc) + + equals(t, 3412, s.LeafPageN) + equals(t, 0, s.LeafOverflowN) + equals(t, 4742482, s.LeafInuse) + equals(t, 13975552, s.LeafAlloc) + return nil }) } // Ensure a bucket can calculate stats. func TestBucket_Stats_Small(t *testing.T) { + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + b, err := tx.CreateBucket([]byte("whozawhats")) + ok(t, err) + b.Put([]byte("foo"), []byte("bar")) - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - // Add a bucket that fits on a single root leaf. - b, err := tx.CreateBucket([]byte("whozawhats")) - assert.NoError(t, err) - b.Put([]byte("foo"), []byte("bar")) - - return nil - }) - mustCheck(db) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 1, stats.KeyN, "KeyN") - assert.Equal(t, 1, stats.Depth, "Depth") - assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") - if os.Getpagesize() == 4096 { - // Incompatible page size - assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") - } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, pageHeaderSize+leafPageElementSize+6, stats.InlineBucketInuse, "InlineBucketInuse") - return nil - }) + return nil + }) + db.MustCheck() + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 0, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 1, stats.KeyN) + equals(t, 1, stats.Depth) + equals(t, 0, stats.BranchInuse) + equals(t, 0, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 0, stats.BranchAlloc) + equals(t, 0, stats.LeafAlloc) + } + equals(t, 1, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, 16+16+6, stats.InlineBucketInuse) + return nil }) } func TestBucket_Stats_EmptyBucket(t *testing.T) { + db := NewTestDB() + defer db.Close() - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - // Add a bucket that fits on a single root leaf. - _, err := tx.CreateBucket([]byte("whozawhats")) - assert.NoError(t, err) - return nil - }) - mustCheck(db) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 0, stats.KeyN, "KeyN") - assert.Equal(t, 1, stats.Depth, "Depth") - assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") - if os.Getpagesize() == 4096 { - // Incompatible page size - assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") - } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, pageHeaderSize, stats.InlineBucketInuse, "InlineBucketInuse") - return nil - }) + db.Update(func(tx *bolt.Tx) error { + // Add a bucket that fits on a single root leaf. + _, err := tx.CreateBucket([]byte("whozawhats")) + ok(t, err) + return nil + }) + db.MustCheck() + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("whozawhats")) + stats := b.Stats() + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 0, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 0, stats.KeyN) + equals(t, 1, stats.Depth) + equals(t, 0, stats.BranchInuse) + equals(t, 0, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 0, stats.BranchAlloc) + equals(t, 0, stats.LeafAlloc) + } + equals(t, 1, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, 16, stats.InlineBucketInuse) + return nil }) } // Ensure a bucket can calculate stats. func TestBucket_Stats_Nested(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - assert.NoError(t, err) - for i := 0; i < 100; i++ { - b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) - } - bar, err := b.CreateBucket([]byte("bar")) - assert.NoError(t, err) - for i := 0; i < 10; i++ { - bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - baz, err := bar.CreateBucket([]byte("baz")) - assert.NoError(t, err) - for i := 0; i < 10; i++ { - baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) - } - return nil - }) + db := NewTestDB() + defer db.Close() - mustCheck(db) + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("foo")) + ok(t, err) + for i := 0; i < 100; i++ { + b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) + } + bar, err := b.CreateBucket([]byte("bar")) + ok(t, err) + for i := 0; i < 10; i++ { + bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) + } + baz, err := bar.CreateBucket([]byte("baz")) + ok(t, err) + for i := 0; i < 10; i++ { + baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) + } + return nil + }) - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("foo")) - stats := b.Stats() - assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 2, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 122, stats.KeyN, "KeyN") - assert.Equal(t, 3, stats.Depth, "Depth") - assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") + db.MustCheck() - foo := pageHeaderSize // foo - foo += 101 * leafPageElementSize // foo leaf elements - foo += 100*2 + 100*2 // foo leaf key/values - foo += 3 + bucketHeaderSize // foo -> bar key/value + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("foo")) + stats := b.Stats() + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 2, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 122, stats.KeyN) + equals(t, 3, stats.Depth) + equals(t, 0, stats.BranchInuse) - bar := pageHeaderSize // bar - bar += 11 * leafPageElementSize // bar leaf elements - bar += 10 + 10 // bar leaf key/values - bar += 3 + bucketHeaderSize // bar -> baz key/value + foo := 16 // foo (pghdr) + foo += 101 * 16 // foo leaf elements + foo += 100*2 + 100*2 // foo leaf key/values + foo += 3 + 16 // foo -> bar key/value - baz := pageHeaderSize // baz (inline) - baz += 10 * leafPageElementSize // baz leaf elements - baz += 10 + 10 // baz leaf key/values + bar := 16 // bar (pghdr) + bar += 11 * 16 // bar leaf elements + bar += 10 + 10 // bar leaf key/values + bar += 3 + 16 // bar -> baz key/value - assert.Equal(t, foo+bar+baz, stats.LeafInuse, "LeafInuse") - if os.Getpagesize() == 4096 { - // Incompatible page size - assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 8192, stats.LeafAlloc, "LeafAlloc") - } - assert.Equal(t, 3, stats.BucketN, "BucketN") - assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, baz, stats.InlineBucketInuse, "InlineBucketInuse") - return nil - }) + baz := 16 // baz (inline) (pghdr) + baz += 10 * 16 // baz leaf elements + baz += 10 + 10 // baz leaf key/values + + equals(t, foo+bar+baz, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 0, stats.BranchAlloc) + equals(t, 8192, stats.LeafAlloc) + } + equals(t, 3, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, baz, stats.InlineBucketInuse) + return nil }) } @@ -850,42 +852,43 @@ func TestBucket_Stats_Large(t *testing.T) { t.Skip("skipping test in short mode.") } - withOpenDB(func(db *DB, path string) { - var index int - for i := 0; i < 100; i++ { - db.Update(func(tx *Tx) error { - // Add bucket with lots of keys. - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for i := 0; i < 1000; i++ { - b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) - index++ - } - return nil - }) - } - mustCheck(db) + db := NewTestDB() + defer db.Close() - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - stats := b.Stats() - assert.Equal(t, 13, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 1196, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 100000, stats.KeyN, "KeyN") - assert.Equal(t, 3, stats.Depth, "Depth") - assert.Equal(t, 25257, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 2596916, stats.LeafInuse, "LeafInuse") - if os.Getpagesize() == 4096 { - // Incompatible page size - assert.Equal(t, 53248, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 4898816, stats.LeafAlloc, "LeafAlloc") + var index int + for i := 0; i < 100; i++ { + db.Update(func(tx *bolt.Tx) error { + // Add bucket with lots of keys. + b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) + for i := 0; i < 1000; i++ { + b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) + index++ } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse") return nil }) + } + db.MustCheck() + + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + stats := b.Stats() + equals(t, 13, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 1196, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 100000, stats.KeyN) + equals(t, 3, stats.Depth) + equals(t, 25257, stats.BranchInuse) + equals(t, 2596916, stats.LeafInuse) + if os.Getpagesize() == 4096 { + // Incompatible page size + equals(t, 53248, stats.BranchAlloc) + equals(t, 4898816, stats.LeafAlloc) + } + equals(t, 1, stats.BucketN) + equals(t, 0, stats.InlineBucketN) + equals(t, 0, stats.InlineBucketInuse) + return nil }) } @@ -897,37 +900,39 @@ func TestBucket_Put_Single(t *testing.T) { index := 0 f := func(items testdata) bool { - withOpenDB(func(db *DB, path string) { - m := make(map[string][]byte) + db := NewTestDB() + defer db.Close() - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - for _, item := range items { - db.Update(func(tx *Tx) error { - if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { - panic("put error: " + err.Error()) - } - m[string(item.Key)] = item.Value - return nil - }) + m := make(map[string][]byte) - // Verify all key/values so far. - db.View(func(tx *Tx) error { - i := 0 - for k, v := range m { - value := tx.Bucket([]byte("widgets")).Get([]byte(k)) - if !bytes.Equal(value, v) { - t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) - copyAndFailNow(t, db) - } - i++ - } - return nil - }) - } + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err }) + for _, item := range items { + db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { + panic("put error: " + err.Error()) + } + m[string(item.Key)] = item.Value + return nil + }) + + // Verify all key/values so far. + db.View(func(tx *bolt.Tx) error { + i := 0 + for k, v := range m { + value := tx.Bucket([]byte("widgets")).Get([]byte(k)) + if !bytes.Equal(value, v) { + t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) + db.CopyTempFile() + t.FailNow() + } + i++ + } + return nil + }) + } index++ return true @@ -944,32 +949,33 @@ func TestBucket_Put_Multiple(t *testing.T) { } f := func(items testdata) bool { - withOpenDB(func(db *DB, path string) { - // Bulk insert all values. - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) - } - return nil - }) - assert.NoError(t, err) + db := NewTestDB() + defer db.Close() + // Bulk insert all values. + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + return nil + }) + ok(t, err) - // Verify all items exist. - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - value := b.Get(item.Key) - if !assert.Equal(t, item.Value, value) { - copyAndFailNow(t, db) - } + // Verify all items exist. + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + value := b.Get(item.Key) + if !bytes.Equal(item.Value, value) { + db.CopyTempFile() + t.Fatalf("exp=%x; got=%x", item.Value, value) } - return nil - }) + } + return nil }) return true } @@ -985,37 +991,37 @@ func TestBucket_Delete_Quick(t *testing.T) { } f := func(items testdata) bool { - withOpenDB(func(db *DB, path string) { - // Bulk insert all values. - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) - } - return nil - }) - assert.NoError(t, err) - - // Remove items one at a time and check consistency. + db := NewTestDB() + defer db.Close() + // Bulk insert all values. + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) for _, item := range items { - err := db.Update(func(tx *Tx) error { - return tx.Bucket([]byte("widgets")).Delete(item.Key) - }) - assert.NoError(t, err) + ok(t, b.Put(item.Key, item.Value)) } + return nil + }) + ok(t, err) - // Anything before our deletion index should be nil. - db.View(func(tx *Tx) error { - tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) - return nil - }) + // Remove items one at a time and check consistency. + for _, item := range items { + err := db.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Delete(item.Key) + }) + ok(t, err) + } + + // Anything before our deletion index should be nil. + db.View(func(tx *bolt.Tx) error { + tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) return nil }) + return nil }) return true } @@ -1026,12 +1032,12 @@ func TestBucket_Delete_Quick(t *testing.T) { func ExampleBucket_Put() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Start a write transaction. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { // Create a bucket. tx.CreateBucket([]byte("widgets")) @@ -1041,7 +1047,7 @@ func ExampleBucket_Put() { }) // Read value back in a different read-only transaction. - db.View(func(tx *Tx) error { + db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value of 'foo' is: %s\n", value) return nil @@ -1053,12 +1059,12 @@ func ExampleBucket_Put() { func ExampleBucket_Delete() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Start a write transaction. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { // Create a bucket. tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) @@ -1073,12 +1079,12 @@ func ExampleBucket_Delete() { }) // Delete the key in a different write transaction. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) }) // Retrieve the key again. - db.View(func(tx *Tx) error { + db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) if value == nil { fmt.Printf("The value of 'foo' is now: nil\n") @@ -1093,12 +1099,12 @@ func ExampleBucket_Delete() { func ExampleBucket_ForEach() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Insert data into a bucket. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("animals")) b := tx.Bucket([]byte("animals")) b.Put([]byte("dog"), []byte("fun")) diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go index b275542..91af960 100644 --- a/cmd/bolt/bench.go +++ b/cmd/bolt/bench.go @@ -283,7 +283,7 @@ func benchStartProfiling(options *BenchOptions) { if options.CPUProfile != "" { cpuprofile, err = os.Create(options.CPUProfile) if err != nil { - fatal("bench: could not create cpu profile %q: %v", options.CPUProfile, err) + fatalf("bench: could not create cpu profile %q: %v", options.CPUProfile, err) } pprof.StartCPUProfile(cpuprofile) } @@ -292,7 +292,7 @@ func benchStartProfiling(options *BenchOptions) { if options.MemProfile != "" { memprofile, err = os.Create(options.MemProfile) if err != nil { - fatal("bench: could not create memory profile %q: %v", options.MemProfile, err) + fatalf("bench: could not create memory profile %q: %v", options.MemProfile, err) } runtime.MemProfileRate = 4096 } @@ -301,7 +301,7 @@ func benchStartProfiling(options *BenchOptions) { if options.BlockProfile != "" { blockprofile, err = os.Create(options.BlockProfile) if err != nil { - fatal("bench: could not create block profile %q: %v", options.BlockProfile, err) + fatalf("bench: could not create block profile %q: %v", options.BlockProfile, err) } runtime.SetBlockProfileRate(1) } diff --git a/cmd/bolt/buckets_test.go b/cmd/bolt/buckets_test.go index 27ee619..d5050fd 100644 --- a/cmd/bolt/buckets_test.go +++ b/cmd/bolt/buckets_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a list of buckets can be retrieved. @@ -20,7 +19,7 @@ func TestBuckets(t *testing.T) { }) db.Close() output := run("buckets", path) - assert.Equal(t, "whatchits\nwidgets\nwoojits", output) + equals(t, "whatchits\nwidgets\nwoojits", output) }) } @@ -28,5 +27,5 @@ func TestBuckets(t *testing.T) { func TestBucketsDBNotFound(t *testing.T) { SetTestMode(true) output := run("buckets", "no/such/db") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } diff --git a/cmd/bolt/export.go b/cmd/bolt/export.go index 2689f32..9a0b112 100644 --- a/cmd/bolt/export.go +++ b/cmd/bolt/export.go @@ -42,7 +42,7 @@ func Export(path string) { // Encode all buckets into JSON. output, err := json.Marshal(root) if err != nil { - return fmt.Errorf("encode: ", err) + return fmt.Errorf("encode: %s", err) } print(string(output)) return nil diff --git a/cmd/bolt/export_test.go b/cmd/bolt/export_test.go index 13f57d1..d98403c 100644 --- a/cmd/bolt/export_test.go +++ b/cmd/bolt/export_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a database can be exported. @@ -32,7 +31,7 @@ func TestExport(t *testing.T) { }) db.Close() output := run("export", path) - assert.Equal(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output) + equals(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output) }) } @@ -40,5 +39,5 @@ func TestExport(t *testing.T) { func TestExport_NotFound(t *testing.T) { SetTestMode(true) output := run("export", "no/such/db") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } diff --git a/cmd/bolt/get_test.go b/cmd/bolt/get_test.go index 7b7c3a0..8acd0f4 100644 --- a/cmd/bolt/get_test.go +++ b/cmd/bolt/get_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a value can be retrieved from the CLI. @@ -19,7 +18,7 @@ func TestGet(t *testing.T) { }) db.Close() output := run("get", path, "widgets", "foo") - assert.Equal(t, "bar", output) + equals(t, "bar", output) }) } @@ -27,7 +26,7 @@ func TestGet(t *testing.T) { func TestGetDBNotFound(t *testing.T) { SetTestMode(true) output := run("get", "no/such/db", "widgets", "foo") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } // Ensure that an error is reported if the bucket is not found. @@ -36,7 +35,7 @@ func TestGetBucketNotFound(t *testing.T) { open(func(db *bolt.DB, path string) { db.Close() output := run("get", path, "widgets", "foo") - assert.Equal(t, "bucket not found: widgets", output) + equals(t, "bucket not found: widgets", output) }) } @@ -50,6 +49,6 @@ func TestGetKeyNotFound(t *testing.T) { }) db.Close() output := run("get", path, "widgets", "foo") - assert.Equal(t, "key not found: foo", output) + equals(t, "key not found: foo", output) }) } diff --git a/cmd/bolt/import_test.go b/cmd/bolt/import_test.go index 3d4f275..086bf03 100644 --- a/cmd/bolt/import_test.go +++ b/cmd/bolt/import_test.go @@ -6,7 +6,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a database can be imported. @@ -15,32 +14,30 @@ func TestImport(t *testing.T) { // Write input file. input := tempfile() - assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600)) + ok(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600)) // Import database. path := tempfile() output := run("import", path, "--input", input) - assert.Equal(t, ``, output) + equals(t, ``, output) // Open database and verify contents. db, err := bolt.Open(path, 0600, nil) - assert.NoError(t, err) + ok(t, err) db.View(func(tx *bolt.Tx) error { - assert.NotNil(t, tx.Bucket([]byte("empty"))) + assert(t, tx.Bucket([]byte("empty")) != nil, "") b := tx.Bucket([]byte("widgets")) - if assert.NotNil(t, b) { - assert.Equal(t, []byte("0000"), b.Get([]byte("foo"))) - assert.Equal(t, []byte(""), b.Get([]byte("bar"))) - } + assert(t, b != nil, "") + equals(t, []byte("0000"), b.Get([]byte("foo"))) + equals(t, []byte(""), b.Get([]byte("bar"))) b = tx.Bucket([]byte("woojits")) - if assert.NotNil(t, b) { - assert.Equal(t, []byte("XXXX"), b.Get([]byte("baz"))) + assert(t, b != nil, "") + equals(t, []byte("XXXX"), b.Get([]byte("baz"))) - b = b.Bucket([]byte("woojits/subbucket")) - assert.Equal(t, []byte("A"), b.Get([]byte("bat"))) - } + b = b.Bucket([]byte("woojits/subbucket")) + equals(t, []byte("A"), b.Get([]byte("bat"))) return nil }) @@ -51,5 +48,5 @@ func TestImport(t *testing.T) { func TestImport_NotFound(t *testing.T) { SetTestMode(true) output := run("import", "path/to/db", "--input", "no/such/file") - assert.Equal(t, "open no/such/file: no such file or directory", output) + equals(t, "open no/such/file: no such file or directory", output) } diff --git a/cmd/bolt/info_test.go b/cmd/bolt/info_test.go index 668cc61..dab74f6 100644 --- a/cmd/bolt/info_test.go +++ b/cmd/bolt/info_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a database info can be printed. @@ -20,7 +19,7 @@ func TestInfo(t *testing.T) { }) db.Close() output := run("info", path) - assert.Equal(t, `Page Size: 4096`, output) + equals(t, `Page Size: 4096`, output) }) } @@ -28,5 +27,5 @@ func TestInfo(t *testing.T) { func TestInfo_NotFound(t *testing.T) { SetTestMode(true) output := run("info", "no/such/db") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } diff --git a/cmd/bolt/keys_test.go b/cmd/bolt/keys_test.go index 2b5a9a0..0cc4e0c 100644 --- a/cmd/bolt/keys_test.go +++ b/cmd/bolt/keys_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a list of keys can be retrieved for a given bucket. @@ -21,7 +20,7 @@ func TestKeys(t *testing.T) { }) db.Close() output := run("keys", path, "widgets") - assert.Equal(t, "0001\n0002\n0003", output) + equals(t, "0001\n0002\n0003", output) }) } @@ -29,7 +28,7 @@ func TestKeys(t *testing.T) { func TestKeysDBNotFound(t *testing.T) { SetTestMode(true) output := run("keys", "no/such/db", "widgets") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } // Ensure that an error is reported if the bucket is not found. @@ -38,6 +37,6 @@ func TestKeysBucketNotFound(t *testing.T) { open(func(db *bolt.DB, path string) { db.Close() output := run("keys", path, "widgets") - assert.Equal(t, "bucket not found: widgets", output) + equals(t, "bucket not found: widgets", output) }) } diff --git a/cmd/bolt/main_test.go b/cmd/bolt/main_test.go index 0614d43..4448d6e 100644 --- a/cmd/bolt/main_test.go +++ b/cmd/bolt/main_test.go @@ -1,9 +1,14 @@ package main_test import ( + "fmt" "io/ioutil" "os" + "path/filepath" + "reflect" + "runtime" "strings" + "testing" "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" @@ -35,3 +40,30 @@ func tempfile() string { os.Remove(f.Name()) return f.Name() } + +// assert fails the test if the condition is false. +func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// ok fails the test if an err is not nil. +func ok(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.FailNow() + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/cmd/bolt/stats_test.go b/cmd/bolt/stats_test.go index 2ad5d51..44ed434 100644 --- a/cmd/bolt/stats_test.go +++ b/cmd/bolt/stats_test.go @@ -7,7 +7,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) func TestStats(t *testing.T) { @@ -40,7 +39,7 @@ func TestStats(t *testing.T) { }) db.Close() output := run("stats", path, "b") - assert.Equal(t, "Aggregate statistics for 2 buckets\n\n"+ + equals(t, "Aggregate statistics for 2 buckets\n\n"+ "Page count statistics\n"+ "\tNumber of logical branch pages: 0\n"+ "\tNumber of physical branch overflow pages: 0\n"+ diff --git a/cursor_test.go b/cursor_test.go index 470860d..6957a29 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -1,4 +1,4 @@ -package bolt +package bolt_test import ( "bytes" @@ -7,103 +7,104 @@ import ( "testing" "testing/quick" - "github.com/stretchr/testify/assert" + "github.com/boltdb/bolt" ) // Ensure that a cursor can return a reference to the bucket that created it. func TestCursor_Bucket(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - c := b.Cursor() - assert.Equal(t, b, c.Bucket()) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucket([]byte("widgets")) + c := b.Cursor() + equals(t, b, c.Bucket()) + return nil }) } // Ensure that a Tx cursor can seek to the appropriate keys. func TestCursor_Seek(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - assert.NoError(t, b.Put([]byte("foo"), []byte("0001"))) - assert.NoError(t, b.Put([]byte("bar"), []byte("0002"))) - assert.NoError(t, b.Put([]byte("baz"), []byte("0003"))) - _, err = b.CreateBucket([]byte("bkt")) - assert.NoError(t, err) - return nil - }) - db.View(func(tx *Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + ok(t, b.Put([]byte("foo"), []byte("0001"))) + ok(t, b.Put([]byte("bar"), []byte("0002"))) + ok(t, b.Put([]byte("baz"), []byte("0003"))) + _, err = b.CreateBucket([]byte("bkt")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() - // Exact match should go to the key. - k, v := c.Seek([]byte("bar")) - assert.Equal(t, []byte("bar"), k) - assert.Equal(t, []byte("0002"), v) + // Exact match should go to the key. + k, v := c.Seek([]byte("bar")) + equals(t, []byte("bar"), k) + equals(t, []byte("0002"), v) - // Inexact match should go to the next key. - k, v = c.Seek([]byte("bas")) - assert.Equal(t, []byte("baz"), k) - assert.Equal(t, []byte("0003"), v) + // Inexact match should go to the next key. + k, v = c.Seek([]byte("bas")) + equals(t, []byte("baz"), k) + equals(t, []byte("0003"), v) - // Low key should go to the first key. - k, v = c.Seek([]byte("")) - assert.Equal(t, []byte("bar"), k) - assert.Equal(t, []byte("0002"), v) + // Low key should go to the first key. + k, v = c.Seek([]byte("")) + equals(t, []byte("bar"), k) + equals(t, []byte("0002"), v) - // High key should return no key. - k, v = c.Seek([]byte("zzz")) - assert.Nil(t, k) - assert.Nil(t, v) + // High key should return no key. + k, v = c.Seek([]byte("zzz")) + assert(t, k == nil, "") + assert(t, v == nil, "") - // Buckets should return their key but no value. - k, v = c.Seek([]byte("bkt")) - assert.Equal(t, []byte("bkt"), k) - assert.Nil(t, v) + // Buckets should return their key but no value. + k, v = c.Seek([]byte("bkt")) + equals(t, []byte("bkt"), k) + assert(t, v == nil, "") - return nil - }) + return nil }) } func TestCursor_Delete(t *testing.T) { - withOpenDB(func(db *DB, path string) { - var count = 1000 + db := NewTestDB() + defer db.Close() - // Insert every other key between 0 and $count. - db.Update(func(tx *Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 1 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(i)) - b.Put(k, make([]byte, 100)) + var count = 1000 + + // Insert every other key between 0 and $count. + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucket([]byte("widgets")) + for i := 0; i < count; i += 1 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(i)) + b.Put(k, make([]byte, 100)) + } + b.CreateBucket([]byte("sub")) + return nil + }) + + db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + bound := make([]byte, 8) + binary.BigEndian.PutUint64(bound, uint64(count/2)) + for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { + if err := c.Delete(); err != nil { + return err } - b.CreateBucket([]byte("sub")) - return nil - }) + } + c.Seek([]byte("sub")) + err := c.Delete() + equals(t, err, bolt.ErrIncompatibleValue) + return nil + }) - db.Update(func(tx *Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - bound := make([]byte, 8) - binary.BigEndian.PutUint64(bound, uint64(count/2)) - for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - } - c.Seek([]byte("sub")) - err := c.Delete() - assert.Equal(t, err, ErrIncompatibleValue) - return nil - }) - - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - assert.Equal(t, b.Stats().KeyN, count/2+1) - return nil - }) + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + equals(t, b.Stats().KeyN, count/2+1) + return nil }) } @@ -113,216 +114,223 @@ func TestCursor_Delete(t *testing.T) { // // Related: https://github.com/boltdb/bolt/pull/187 func TestCursor_Seek_Large(t *testing.T) { - withOpenDB(func(db *DB, path string) { - var count = 10000 + db := NewTestDB() + defer db.Close() - // Insert every other key between 0 and $count. - db.Update(func(tx *Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - for i := 0; i < count; i += 100 { - for j := i; j < i+100; j += 2 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(j)) - b.Put(k, make([]byte, 100)) - } + var count = 10000 + + // Insert every other key between 0 and $count. + db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucket([]byte("widgets")) + for i := 0; i < count; i += 100 { + for j := i; j < i+100; j += 2 { + k := make([]byte, 8) + binary.BigEndian.PutUint64(k, uint64(j)) + b.Put(k, make([]byte, 100)) } - return nil - }) + } + return nil + }) - db.View(func(tx *Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - for i := 0; i < count; i++ { - seek := make([]byte, 8) - binary.BigEndian.PutUint64(seek, uint64(i)) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + for i := 0; i < count; i++ { + seek := make([]byte, 8) + binary.BigEndian.PutUint64(seek, uint64(i)) - k, _ := c.Seek(seek) + k, _ := c.Seek(seek) - // The last seek is beyond the end of the the range so - // it should return nil. - if i == count-1 { - assert.Nil(t, k) - continue - } - - // Otherwise we should seek to the exact key or the next key. - num := binary.BigEndian.Uint64(k) - if i%2 == 0 { - assert.Equal(t, uint64(i), num) - } else { - assert.Equal(t, uint64(i+1), num) - } + // The last seek is beyond the end of the the range so + // it should return nil. + if i == count-1 { + assert(t, k == nil, "") + continue } - return nil - }) + // Otherwise we should seek to the exact key or the next key. + num := binary.BigEndian.Uint64(k) + if i%2 == 0 { + equals(t, uint64(i), num) + } else { + equals(t, uint64(i+1), num) + } + } + + return nil }) } // Ensure that a cursor can iterate over an empty bucket without error. func TestCursor_EmptyBucket(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.First() - assert.Nil(t, k) - assert.Nil(t, v) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.First() + assert(t, k == nil, "") + assert(t, v == nil, "") + return nil }) } // Ensure that a Tx cursor can reverse iterate over an empty bucket without error. func TestCursor_EmptyBucketReverse(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.View(func(tx *Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - assert.Nil(t, k) - assert.Nil(t, v) - return nil - }) + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + db.View(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("widgets")).Cursor() + k, v := c.Last() + assert(t, k == nil, "") + assert(t, v == nil, "") + return nil }) } // Ensure that a Tx cursor can iterate over a single root with a couple elements. func TestCursor_Iterate_Leaf(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() + db := NewTestDB() + defer db.Close() - k, v := c.First() - assert.Equal(t, string(k), "bar") - assert.Equal(t, v, []byte{1}) - - k, v = c.Next() - assert.Equal(t, string(k), "baz") - assert.Equal(t, v, []byte{}) - - k, v = c.Next() - assert.Equal(t, string(k), "foo") - assert.Equal(t, v, []byte{0}) - - k, v = c.Next() - assert.Nil(t, k) - assert.Nil(t, v) - - k, v = c.Next() - assert.Nil(t, k) - assert.Nil(t, v) - - tx.Rollback() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) + return nil }) + tx, _ := db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + + k, v := c.First() + equals(t, string(k), "bar") + equals(t, v, []byte{1}) + + k, v = c.Next() + equals(t, string(k), "baz") + equals(t, v, []byte{}) + + k, v = c.Next() + equals(t, string(k), "foo") + equals(t, v, []byte{0}) + + k, v = c.Next() + assert(t, k == nil, "") + assert(t, v == nil, "") + + k, v = c.Next() + assert(t, k == nil, "") + assert(t, v == nil, "") + + tx.Rollback() } // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. func TestCursor_LeafRootReverse(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) - return nil - }) - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() + db := NewTestDB() + defer db.Close() - k, v := c.Last() - assert.Equal(t, string(k), "foo") - assert.Equal(t, v, []byte{0}) - - k, v = c.Prev() - assert.Equal(t, string(k), "baz") - assert.Equal(t, v, []byte{}) - - k, v = c.Prev() - assert.Equal(t, string(k), "bar") - assert.Equal(t, v, []byte{1}) - - k, v = c.Prev() - assert.Nil(t, k) - assert.Nil(t, v) - - k, v = c.Prev() - assert.Nil(t, k) - assert.Nil(t, v) - - tx.Rollback() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) + return nil }) + tx, _ := db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + + k, v := c.Last() + equals(t, string(k), "foo") + equals(t, v, []byte{0}) + + k, v = c.Prev() + equals(t, string(k), "baz") + equals(t, v, []byte{}) + + k, v = c.Prev() + equals(t, string(k), "bar") + equals(t, v, []byte{1}) + + k, v = c.Prev() + assert(t, k == nil, "") + assert(t, v == nil, "") + + k, v = c.Prev() + assert(t, k == nil, "") + assert(t, v == nil, "") + + tx.Rollback() } // Ensure that a Tx cursor can restart from the beginning. func TestCursor_Restart(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) - return nil - }) + db := NewTestDB() + defer db.Close() - tx, _ := db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - - k, _ := c.First() - assert.Equal(t, string(k), "bar") - - k, _ = c.Next() - assert.Equal(t, string(k), "foo") - - k, _ = c.First() - assert.Equal(t, string(k), "bar") - - k, _ = c.Next() - assert.Equal(t, string(k), "foo") - - tx.Rollback() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) + return nil }) + + tx, _ := db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + + k, _ := c.First() + equals(t, string(k), "bar") + + k, _ = c.Next() + equals(t, string(k), "foo") + + k, _ = c.First() + equals(t, string(k), "bar") + + k, _ = c.Next() + equals(t, string(k), "foo") + + tx.Rollback() } // Ensure that a Tx can iterate over all elements in a bucket. func TestCursor_QuickCheck(t *testing.T) { f := func(items testdata) bool { - withOpenDB(func(db *DB, path string) { - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) - } - assert.NoError(t, tx.Commit()) + db := NewTestDB() + defer db.Close() - // Sort test data. - sort.Sort(items) + // Bulk insert all values. + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + ok(t, tx.Commit()) + + // Sort test data. + sort.Sort(items) + + // Iterate over all items and check consistency. + var index = 0 + tx, _ = db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { + equals(t, k, items[index].Key) + equals(t, v, items[index].Value) + index++ + } + equals(t, len(items), index) + tx.Rollback() - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - assert.Equal(t, k, items[index].Key) - assert.Equal(t, v, items[index].Value) - index++ - } - assert.Equal(t, len(items), index) - tx.Rollback() - }) return true } if err := quick.Check(f, qconfig()); err != nil { @@ -333,31 +341,33 @@ func TestCursor_QuickCheck(t *testing.T) { // Ensure that a transaction can iterate over all elements in a bucket in reverse. func TestCursor_QuickCheck_Reverse(t *testing.T) { f := func(items testdata) bool { - withOpenDB(func(db *DB, path string) { - // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) - } - assert.NoError(t, tx.Commit()) + db := NewTestDB() + defer db.Close() - // Sort test data. - sort.Sort(revtestdata(items)) + // Bulk insert all values. + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + ok(t, b.Put(item.Key, item.Value)) + } + ok(t, tx.Commit()) + + // Sort test data. + sort.Sort(revtestdata(items)) + + // Iterate over all items and check consistency. + var index = 0 + tx, _ = db.Begin(false) + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { + equals(t, k, items[index].Key) + equals(t, v, items[index].Value) + index++ + } + equals(t, len(items), index) + tx.Rollback() - // Iterate over all items and check consistency. - var index = 0 - tx, _ = db.Begin(false) - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - assert.Equal(t, k, items[index].Key) - assert.Equal(t, v, items[index].Value) - index++ - } - assert.Equal(t, len(items), index) - tx.Rollback() - }) return true } if err := quick.Check(f, qconfig()); err != nil { @@ -367,54 +377,56 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) { // Ensure that a Tx cursor can iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - _, err = b.CreateBucket([]byte("foo")) - assert.NoError(t, err) - _, err = b.CreateBucket([]byte("bar")) - assert.NoError(t, err) - _, err = b.CreateBucket([]byte("baz")) - assert.NoError(t, err) - return nil - }) - db.View(func(tx *Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - names = append(names, string(k)) - assert.Nil(t, v) - } - assert.Equal(t, names, []string{"bar", "baz", "foo"}) - return nil - }) + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + _, err = b.CreateBucket([]byte("foo")) + ok(t, err) + _, err = b.CreateBucket([]byte("bar")) + ok(t, err) + _, err = b.CreateBucket([]byte("baz")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + names = append(names, string(k)) + assert(t, v == nil, "") + } + equals(t, names, []string{"bar", "baz", "foo"}) + return nil }) } // Ensure that a Tx cursor can reverse iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - _, err = b.CreateBucket([]byte("foo")) - assert.NoError(t, err) - _, err = b.CreateBucket([]byte("bar")) - assert.NoError(t, err) - _, err = b.CreateBucket([]byte("baz")) - assert.NoError(t, err) - return nil - }) - db.View(func(tx *Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil; k, v = c.Prev() { - names = append(names, string(k)) - assert.Nil(t, v) - } - assert.Equal(t, names, []string{"foo", "baz", "bar"}) - return nil - }) + db := NewTestDB() + defer db.Close() + + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + ok(t, err) + _, err = b.CreateBucket([]byte("foo")) + ok(t, err) + _, err = b.CreateBucket([]byte("bar")) + ok(t, err) + _, err = b.CreateBucket([]byte("baz")) + ok(t, err) + return nil + }) + db.View(func(tx *bolt.Tx) error { + var names []string + c := tx.Bucket([]byte("widgets")).Cursor() + for k, v := c.Last(); k != nil; k, v = c.Prev() { + names = append(names, string(k)) + assert(t, v == nil, "") + } + equals(t, names, []string{"foo", "baz", "bar"}) + return nil }) } diff --git a/db.go b/db.go index bb6beef..7364454 100644 --- a/db.go +++ b/db.go @@ -384,8 +384,8 @@ func (db *DB) beginRWTx() (*Tx, error) { // Free any pages associated with closed read-only transactions. var minid txid = 0xFFFFFFFFFFFFFFFF for _, t := range db.txs { - if t.id() < minid { - minid = t.id() + if t.meta.txid < minid { + minid = t.meta.txid } } if minid > 0 { diff --git a/db_test.go b/db_test.go index 2063249..e9da776 100644 --- a/db_test.go +++ b/db_test.go @@ -1,4 +1,4 @@ -package bolt +package bolt_test import ( "errors" @@ -12,29 +12,28 @@ import ( "strings" "testing" "time" - "unsafe" - "github.com/stretchr/testify/assert" + "github.com/boltdb/bolt" ) var statsFlag = flag.Bool("stats", false, "show performance stats") // Ensure that opening a database with a bad path returns an error. func TestOpen_BadPath(t *testing.T) { - db, err := Open("", 0666, nil) - assert.Error(t, err) - assert.Nil(t, db) + db, err := bolt.Open("", 0666, nil) + assert(t, err != nil, "err: %s", err) + assert(t, db == nil, "") } // Ensure that a database can be opened without error. func TestOpen(t *testing.T) { - withTempPath(func(path string) { - db, err := Open(path, 0666, nil) - assert.NotNil(t, db) - assert.NoError(t, err) - assert.Equal(t, db.Path(), path) - assert.NoError(t, db.Close()) - }) + path := tempfile() + defer os.Remove(path) + db, err := bolt.Open(path, 0666, nil) + assert(t, db != nil, "") + ok(t, err) + equals(t, db.Path(), path) + ok(t, db.Close()) } // Ensure that opening an already open database file will timeout. @@ -42,21 +41,23 @@ func TestOpen_Timeout(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("timeout not supported on windows") } - withTempPath(func(path string) { - // Open a data file. - db0, err := Open(path, 0666, nil) - assert.NotNil(t, db0) - assert.NoError(t, err) - // Attempt to open the database again. - start := time.Now() - db1, err := Open(path, 0666, &Options{Timeout: 100 * time.Millisecond}) - assert.Nil(t, db1) - assert.Equal(t, ErrTimeout, err) - assert.True(t, time.Since(start) > 100*time.Millisecond) + path := tempfile() + defer os.Remove(path) - db0.Close() - }) + // Open a data file. + db0, err := bolt.Open(path, 0666, nil) + assert(t, db0 != nil, "") + ok(t, err) + + // Attempt to open the database again. + start := time.Now() + db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) + assert(t, db1 == nil, "") + equals(t, bolt.ErrTimeout, err) + assert(t, time.Since(start) > 100*time.Millisecond, "") + + db0.Close() } // Ensure that opening an already open database file will wait until its closed. @@ -64,48 +65,51 @@ func TestOpen_Wait(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("timeout not supported on windows") } - withTempPath(func(path string) { - // Open a data file. - db0, err := Open(path, 0666, nil) - assert.NotNil(t, db0) - assert.NoError(t, err) - // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) + path := tempfile() + defer os.Remove(path) - // Attempt to open the database again. - start := time.Now() - db1, err := Open(path, 0666, &Options{Timeout: 200 * time.Millisecond}) - assert.NotNil(t, db1) - assert.NoError(t, err) - assert.True(t, time.Since(start) > 100*time.Millisecond) - }) + // Open a data file. + db0, err := bolt.Open(path, 0666, nil) + assert(t, db0 != nil, "") + ok(t, err) + + // Close it in just a bit. + time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) + + // Attempt to open the database again. + start := time.Now() + db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) + assert(t, db1 != nil, "") + ok(t, err) + assert(t, time.Since(start) > 100*time.Millisecond, "") } // Ensure that a re-opened database is consistent. func TestOpen_Check(t *testing.T) { - withTempPath(func(path string) { - db, err := Open(path, 0666, nil) - assert.NoError(t, err) - assert.NoError(t, db.View(func(tx *Tx) error { return <-tx.Check() })) - db.Close() + path := tempfile() + defer os.Remove(path) - db, err = Open(path, 0666, nil) - assert.NoError(t, err) - assert.NoError(t, db.View(func(tx *Tx) error { return <-tx.Check() })) - db.Close() - }) + db, err := bolt.Open(path, 0666, nil) + ok(t, err) + ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + db.Close() + + db, err = bolt.Open(path, 0666, nil) + ok(t, err) + ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + db.Close() } // Ensure that the database returns an error if the file handle cannot be open. func TestDB_Open_FileError(t *testing.T) { - withTempPath(func(path string) { - _, err := Open(path+"/youre-not-my-real-parent", 0666, nil) - if err, _ := err.(*os.PathError); assert.Error(t, err) { - assert.Equal(t, path+"/youre-not-my-real-parent", err.Path) - assert.Equal(t, "open", err.Op) - } - }) + path := tempfile() + defer os.Remove(path) + + _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) + assert(t, err.(*os.PathError) != nil, "") + equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path) + equals(t, "open", err.(*os.PathError).Op) } // Ensure that write errors to the meta file handler during initialization are returned. @@ -115,218 +119,227 @@ func TestDB_Open_MetaInitWriteError(t *testing.T) { // Ensure that a database that is too small returns an error. func TestDB_Open_FileTooSmall(t *testing.T) { - withTempPath(func(path string) { - db, err := Open(path, 0666, nil) - assert.NoError(t, err) - db.Close() + path := tempfile() + defer os.Remove(path) - // corrupt the database - assert.NoError(t, os.Truncate(path, int64(os.Getpagesize()))) + db, err := bolt.Open(path, 0666, nil) + ok(t, err) + db.Close() - db, err = Open(path, 0666, nil) - assert.Equal(t, errors.New("file size too small"), err) - }) + // corrupt the database + ok(t, os.Truncate(path, int64(os.Getpagesize()))) + + db, err = bolt.Open(path, 0666, nil) + equals(t, errors.New("file size too small"), err) } -// Ensure that corrupt meta0 page errors get returned. -func TestDB_Open_CorruptMeta0(t *testing.T) { - withTempPath(func(path string) { - var m meta - m.magic = magic - m.version = version - m.pageSize = 0x8000 - - // Create a file with bad magic. - b := make([]byte, 0x10000) - p0, p1 := (*page)(unsafe.Pointer(&b[0x0000])), (*page)(unsafe.Pointer(&b[0x8000])) - p0.meta().magic = 0 - p0.meta().version = version - p1.meta().magic = magic - p1.meta().version = version - err := ioutil.WriteFile(path, b, 0666) - assert.NoError(t, err) - - // Open the database. - _, err = Open(path, 0666, nil) - assert.Equal(t, err, errors.New("meta0 error: invalid database")) - }) -} - -// Ensure that a corrupt meta page checksum causes the open to fail. -func TestDB_Open_MetaChecksumError(t *testing.T) { - for i := 0; i < 2; i++ { - withTempPath(func(path string) { - db, err := Open(path, 0600, nil) - pageSize := db.pageSize - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("woojits")) - return err - }) - db.Close() - - // Change a single byte in the meta page. - f, _ := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600) - f.WriteAt([]byte{1}, int64((i*pageSize)+(pageHeaderSize+12))) - f.Sync() - f.Close() - - // Reopen the database. - _, err = Open(path, 0600, nil) - if assert.Error(t, err) { - if i == 0 { - assert.Equal(t, "meta0 error: checksum error", err.Error()) - } else { - assert.Equal(t, "meta1 error: checksum error", err.Error()) - } - } - }) - } -} +// TODO(benbjohnson): Test corruption at every byte of the first two pages. // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_DatabaseNotOpen(t *testing.T) { - var db DB + var db bolt.DB tx, err := db.Begin(false) - assert.Nil(t, tx) - assert.Equal(t, err, ErrDatabaseNotOpen) + assert(t, tx == nil, "") + equals(t, err, bolt.ErrDatabaseNotOpen) } // Ensure that a read-write transaction can be retrieved. func TestDB_BeginRW(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, err := db.Begin(true) - assert.NotNil(t, tx) - assert.NoError(t, err) - assert.Equal(t, tx.DB(), db) - assert.Equal(t, tx.Writable(), true) - assert.NoError(t, tx.Commit()) - }) + db := NewTestDB() + defer db.Close() + tx, err := db.Begin(true) + assert(t, tx != nil, "") + ok(t, err) + assert(t, tx.DB() == db.DB, "") + equals(t, tx.Writable(), true) + ok(t, tx.Commit()) } // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { - var db DB + var db bolt.DB tx, err := db.Begin(true) - assert.Equal(t, err, ErrDatabaseNotOpen) - assert.Nil(t, tx) + equals(t, err, bolt.ErrDatabaseNotOpen) + assert(t, tx == nil, "") } // Ensure a database can provide a transactional block. func TestDB_Update(t *testing.T) { - withOpenDB(func(db *DB, path string) { - err := db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - b.Put([]byte("baz"), []byte("bat")) - b.Delete([]byte("foo")) - return nil - }) - assert.NoError(t, err) - err = db.View(func(tx *Tx) error { - assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) - assert.NoError(t, err) + db := NewTestDB() + defer db.Close() + err := db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + b.Put([]byte("foo"), []byte("bar")) + b.Put([]byte("baz"), []byte("bat")) + b.Delete([]byte("foo")) + return nil }) + ok(t, err) + err = db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + return nil + }) + ok(t, err) } // Ensure a closed database returns an error while running a transaction block func TestDB_Update_Closed(t *testing.T) { - var db DB - err := db.Update(func(tx *Tx) error { + var db bolt.DB + err := db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) return nil }) - assert.Equal(t, err, ErrDatabaseNotOpen) + equals(t, err, bolt.ErrDatabaseNotOpen) } // Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommitAndRollback(t *testing.T) { - var db DB - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - assert.Panics(t, func() { tx.Commit() }) - assert.Panics(t, func() { tx.Rollback() }) +func TestDB_Update_ManualCommit(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Commit() + }() return nil }) - db.View(func(tx *Tx) error { - assert.Panics(t, func() { tx.Commit() }) - assert.Panics(t, func() { tx.Rollback() }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_Update_ManualRollback(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Rollback() + }() return nil }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_View_ManualCommit(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Commit() + }() + return nil + }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_View_ManualRollback(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Rollback() + }() + return nil + }) + assert(t, ok, "expected panic") } // Ensure a write transaction that panics does not hold open locks. func TestDB_Update_Panic(t *testing.T) { - withOpenDB(func(db *DB, path string) { - func() { - defer func() { - if r := recover(); r != nil { - warn("recover: update", r) - } - }() - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - panic("omg") - }) + db := NewTestDB() + defer db.Close() + + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: update", r) + } }() - - // Verify we can update again. - err := db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + panic("omg") }) - assert.NoError(t, err) + }() - // Verify that our change persisted. - err = db.Update(func(tx *Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) - return nil - }) + // Verify we can update again. + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) + ok(t, err) + + // Verify that our change persisted. + err = db.Update(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + return nil }) } // Ensure a database can return an error through a read-only transactional block. func TestDB_View_Error(t *testing.T) { - withOpenDB(func(db *DB, path string) { - err := db.View(func(tx *Tx) error { - return errors.New("xxx") - }) - assert.Equal(t, errors.New("xxx"), err) + db := NewTestDB() + defer db.Close() + err := db.View(func(tx *bolt.Tx) error { + return errors.New("xxx") }) + equals(t, errors.New("xxx"), err) } // Ensure a read transaction that panics does not hold open locks. func TestDB_View_Panic(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + return nil + }) - func() { - defer func() { - if r := recover(); r != nil { - warn("recover: view", r) - } - }() - db.View(func(tx *Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) - panic("omg") - }) + func() { + defer func() { + if r := recover(); r != nil { + t.Log("recover: view", r) + } }() - - // Verify that we can still use read transactions. - db.View(func(tx *Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) - return nil + db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + panic("omg") }) + }() + + // Verify that we can still use read transactions. + db.View(func(tx *bolt.Tx) error { + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + return nil }) } @@ -337,157 +350,85 @@ func TestDB_Commit_WriteFail(t *testing.T) { // Ensure that DB stats can be returned. func TestDB_Stats(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - stats := db.Stats() - assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount") - assert.Equal(t, 0, stats.FreePageN, "FreePageN") - assert.Equal(t, 2, stats.PendingPageN, "PendingPageN") + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err }) -} - -// Ensure that the mmap grows appropriately. -func TestDB_mmapSize(t *testing.T) { - db := &DB{pageSize: 4096} - assert.Equal(t, db.mmapSize(0), minMmapSize) - assert.Equal(t, db.mmapSize(16384), minMmapSize) - assert.Equal(t, db.mmapSize(minMmapSize-1), minMmapSize) - assert.Equal(t, db.mmapSize(minMmapSize), minMmapSize) - assert.Equal(t, db.mmapSize(minMmapSize+1), (minMmapSize*2)+4096) - assert.Equal(t, db.mmapSize(10000000), 20000768) - assert.Equal(t, db.mmapSize((1<<30)-1), 2147483648) - assert.Equal(t, db.mmapSize(1<<30), 1<<31) + stats := db.Stats() + equals(t, 2, stats.TxStats.PageCount) + equals(t, 0, stats.FreePageN) + equals(t, 2, stats.PendingPageN) } // Ensure that database pages are in expected order and type. func TestDB_Consistency(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }) - for i := 0; i < 10; i++ { - db.Update(func(tx *Tx) error { - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - return nil - }) - } - db.Update(func(tx *Tx) error { - if p, _ := tx.Page(0); assert.NotNil(t, p) { - assert.Equal(t, "meta", p.Type) - } - if p, _ := tx.Page(1); assert.NotNil(t, p) { - assert.Equal(t, "meta", p.Type) - } - if p, _ := tx.Page(2); assert.NotNil(t, p) { - assert.Equal(t, "free", p.Type) - } - if p, _ := tx.Page(3); assert.NotNil(t, p) { - assert.Equal(t, "free", p.Type) - } - if p, _ := tx.Page(4); assert.NotNil(t, p) { - assert.Equal(t, "leaf", p.Type) // root leaf - } - if p, _ := tx.Page(5); assert.NotNil(t, p) { - assert.Equal(t, "freelist", p.Type) - } - p, _ := tx.Page(6) - assert.Nil(t, p) + for i := 0; i < 10; i++ { + db.Update(func(tx *bolt.Tx) error { + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) return nil }) - }) -} + } + db.Update(func(tx *bolt.Tx) error { + p, _ := tx.Page(0) + assert(t, p != nil, "") + equals(t, "meta", p.Type) -// Ensure that a database can return a string representation of itself. -func TestDB_String(t *testing.T) { - db := &DB{path: "/foo/bar"} - assert.Equal(t, db.String(), `DB<"/foo/bar">`) - assert.Equal(t, db.GoString(), `bolt.DB{path:"/foo/bar"}`) + p, _ = tx.Page(1) + assert(t, p != nil, "") + equals(t, "meta", p.Type) + + p, _ = tx.Page(2) + assert(t, p != nil, "") + equals(t, "free", p.Type) + + p, _ = tx.Page(3) + assert(t, p != nil, "") + equals(t, "free", p.Type) + + p, _ = tx.Page(4) + assert(t, p != nil, "") + equals(t, "leaf", p.Type) + + p, _ = tx.Page(5) + assert(t, p != nil, "") + equals(t, "freelist", p.Type) + + p, _ = tx.Page(6) + assert(t, p == nil, "") + return nil + }) } // Ensure that DB stats can be substracted from one another. func TestDBStats_Sub(t *testing.T) { - var a, b Stats + var a, b bolt.Stats a.TxStats.PageCount = 3 a.FreePageN = 4 b.TxStats.PageCount = 10 b.FreePageN = 14 diff := b.Sub(&a) - assert.Equal(t, 7, diff.TxStats.PageCount) + equals(t, 7, diff.TxStats.PageCount) // free page stats are copied from the receiver and not subtracted - assert.Equal(t, 14, diff.FreePageN) -} - -// Ensure that meta with bad magic is invalid. -func TestMeta_validate_magic(t *testing.T) { - m := &meta{magic: 0x01234567} - assert.Equal(t, m.validate(), ErrInvalid) -} - -// Ensure that meta with a bad version is invalid. -func TestMeta_validate_version(t *testing.T) { - m := &meta{magic: magic, version: 200} - assert.Equal(t, m.validate(), ErrVersionMismatch) -} - -// Ensure that a DB in strict mode will fail when corrupted. -func TestDB_StrictMode(t *testing.T) { - var msg string - func() { - defer func() { - msg = fmt.Sprintf("%s", recover()) - }() - - withOpenDB(func(db *DB, path string) { - db.StrictMode = true - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("foo")) - - // Corrupt the DB by extending the high water mark. - tx.meta.pgid++ - - return nil - }) - }) - }() - - assert.Equal(t, "check fail: page 4: unreachable unfreed", msg) -} - -// Ensure that a double freeing a page will result in a panic. -func TestDB_DoubleFree(t *testing.T) { - var msg string - func() { - defer func() { - msg = fmt.Sprintf("%s", recover()) - }() - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("foo")) - - // Corrupt the DB by adding a page to the freelist. - db.freelist.free(0, tx.page(3)) - - return nil - }) - }) - }() - - assert.Equal(t, "assertion failed: page 3 already freed", msg) + equals(t, 14, diff.FreePageN) } func ExampleDB_Update() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Execute several commands within a write transaction. - err := db.Update(func(tx *Tx) error { + err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { return err @@ -500,7 +441,7 @@ func ExampleDB_Update() { // If our transactional block didn't return an error then our data is saved. if err == nil { - db.View(func(tx *Tx) error { + db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value of 'foo' is: %s\n", value) return nil @@ -513,12 +454,12 @@ func ExampleDB_Update() { func ExampleDB_View() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Insert data into a bucket. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("people")) b := tx.Bucket([]byte("people")) b.Put([]byte("john"), []byte("doe")) @@ -527,7 +468,7 @@ func ExampleDB_View() { }) // Access data from within a read-only transactional block. - db.View(func(tx *Tx) error { + db.View(func(tx *bolt.Tx) error { v := tx.Bucket([]byte("people")).Get([]byte("john")) fmt.Printf("John's last name is %s.\n", v) return nil @@ -539,12 +480,12 @@ func ExampleDB_View() { func ExampleDB_Begin_ReadOnly() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Create a bucket. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) @@ -571,44 +512,54 @@ func ExampleDB_Begin_ReadOnly() { // zephyr likes purple } -// tempfile returns a temporary file path. -func tempfile() string { - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - return f.Name() +// TestDB represents a wrapper around a Bolt DB to handle temporary file +// creation and automatic cleanup on close. +type TestDB struct { + *bolt.DB } -// withTempPath executes a function with a database reference. -func withTempPath(fn func(string)) { - path := tempfile() - defer os.RemoveAll(path) - fn(path) +// NewTestDB returns a new instance of TestDB. +func NewTestDB() *TestDB { + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + panic("cannot open db: " + err.Error()) + } + return &TestDB{db} } -// withOpenDB executes a function with an already opened database. -func withOpenDB(fn func(*DB, string)) { - withTempPath(func(path string) { - db, err := Open(path, 0666, nil) - if err != nil { - panic("cannot open db: " + err.Error()) - } - defer db.Close() - fn(db, path) +// Close closes the database and deletes the underlying file. +func (db *TestDB) Close() { + // Log statistics. + if *statsFlag { + db.PrintStats() + } - // Log statistics. - if *statsFlag { - logStats(db) - } + // Check database consistency after every test. + db.MustCheck() - // Check database consistency after every test. - mustCheck(db) - }) + // Close database and remove file. + defer os.Remove(db.Path()) + db.DB.Close() } -// mustCheck runs a consistency check on the database and panics if any errors are found. -func mustCheck(db *DB) { - db.View(func(tx *Tx) error { +// PrintStats prints the database stats +func (db *TestDB) PrintStats() { + var stats = db.Stats() + fmt.Printf("[db] %-20s %-20s %-20s\n", + fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), + fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), + fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), + ) + fmt.Printf(" %-20s %-20s %-20s\n", + fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), + fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), + fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), + ) +} + +// MustCheck runs a consistency check on the database and panics if any errors are found. +func (db *TestDB) MustCheck() { + db.View(func(tx *bolt.Tx) error { // Collect all the errors. var errors []error for err := range tx.Check() { @@ -640,8 +591,23 @@ func mustCheck(db *DB) { }) } +// CopyTempFile copies a database to a temporary file. +func (db *TestDB) CopyTempFile() { + path := tempfile() + db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) }) + fmt.Println("db copied to: ", path) +} + +// tempfile returns a temporary file path. +func tempfile() string { + f, _ := ioutil.TempFile("", "bolt-") + f.Close() + os.Remove(f.Name()) + return f.Name() +} + // mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *Bucket, m map[string]string) { +func mustContainKeys(b *bolt.Bucket, m map[string]string) { found := make(map[string]string) b.ForEach(func(k, _ []byte) error { found[string(k)] = "" @@ -679,29 +645,6 @@ func trunc(b []byte, length int) []byte { return b } -// writes the current database stats to the testing log. -func logStats(db *DB) { - var stats = db.Stats() - fmt.Printf("[db] %-20s %-20s %-20s\n", - fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), - fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), - fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), - ) - fmt.Printf(" %-20s %-20s %-20s\n", - fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), - fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), - fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), - ) -} - func truncDuration(d time.Duration) string { return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") } - -// copyAndFailNow copies a database to a new location and then fails then test. -func copyAndFailNow(t *testing.T, db *DB) { - path := tempfile() - db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) }) - fmt.Println("db copied to: ", path) - t.FailNow() -} diff --git a/freelist_test.go b/freelist_test.go index 24ce0f6..792ca92 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -1,24 +1,27 @@ package bolt import ( + "reflect" "testing" "unsafe" - - "github.com/stretchr/testify/assert" ) // Ensure that a page is added to a transaction's freelist. func TestFreelist_free(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12}) - assert.Equal(t, f.pending[100], []pgid{12}) + if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) + } } // Ensure that a page and its overflow is added to a transaction's freelist. func TestFreelist_free_overflow(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12, overflow: 3}) - assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15}) + if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) + } } // Ensure that a transaction's free pages can be released. @@ -29,25 +32,56 @@ func TestFreelist_release(t *testing.T) { f.free(102, &page{id: 39}) f.release(100) f.release(101) - assert.Equal(t, []pgid{9, 12, 13}, f.ids) + if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + f.release(102) - assert.Equal(t, []pgid{9, 12, 13, 39}, f.ids) + if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } } // Ensure that a freelist can find contiguous blocks of pages. func TestFreelist_allocate(t *testing.T) { f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - assert.Equal(t, 3, int(f.allocate(3))) - assert.Equal(t, 6, int(f.allocate(1))) - assert.Equal(t, 0, int(f.allocate(3))) - assert.Equal(t, 12, int(f.allocate(2))) - assert.Equal(t, 7, int(f.allocate(1))) - assert.Equal(t, 0, int(f.allocate(0))) - assert.Equal(t, []pgid{9, 18}, f.ids) - assert.Equal(t, 9, int(f.allocate(1))) - assert.Equal(t, 18, int(f.allocate(1))) - assert.Equal(t, 0, int(f.allocate(1))) - assert.Equal(t, []pgid{}, f.ids) + if id := int(f.allocate(3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.allocate(1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.allocate(3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.allocate(1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + if id := int(f.allocate(1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.allocate(1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.allocate(1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } } // Ensure that a freelist can deserialize from a freelist page. @@ -68,9 +102,9 @@ func TestFreelist_read(t *testing.T) { f.read(page) // Ensure that there are two page ids in the freelist. - assert.Equal(t, len(f.ids), 2) - assert.Equal(t, f.ids[0], pgid(23)) - assert.Equal(t, f.ids[1], pgid(50)) + if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } } // Ensure that a freelist can serialize into a freelist page. @@ -89,10 +123,7 @@ func TestFreelist_write(t *testing.T) { // Ensure that the freelist is correct. // All pages should be present and in reverse order. - assert.Equal(t, len(f2.ids), 5) - assert.Equal(t, f2.ids[0], pgid(3)) - assert.Equal(t, f2.ids[1], pgid(11)) - assert.Equal(t, f2.ids[2], pgid(12)) - assert.Equal(t, f2.ids[3], pgid(28)) - assert.Equal(t, f2.ids[4], pgid(39)) + if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { + t.Fatalf("exp=%v; got=%v", exp, f2.ids) + } } diff --git a/node.go b/node.go index 9dbc3f9..c204c39 100644 --- a/node.go +++ b/node.go @@ -337,7 +337,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.id(), tx.page(node.pgid)) + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) node.pgid = 0 } @@ -565,7 +565,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.id(), n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) n.pgid = 0 } } diff --git a/node_test.go b/node_test.go index b85e18f..fa5d10f 100644 --- a/node_test.go +++ b/node_test.go @@ -3,8 +3,6 @@ package bolt import ( "testing" "unsafe" - - "github.com/stretchr/testify/assert" ) // Ensure that a node can insert a key/value. @@ -14,14 +12,22 @@ func TestNode_put(t *testing.T) { n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - assert.Equal(t, len(n.inodes), 3) - assert.Equal(t, n.inodes[0].key, []byte("bar")) - assert.Equal(t, n.inodes[0].value, []byte("1")) - assert.Equal(t, n.inodes[1].key, []byte("baz")) - assert.Equal(t, n.inodes[1].value, []byte("2")) - assert.Equal(t, n.inodes[2].key, []byte("foo")) - assert.Equal(t, n.inodes[2].value, []byte("3")) - assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag)) + + if len(n.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if n.inodes[2].flags != uint32(leafPageFlag) { + t.Fatalf("not a leaf: %d", n.inodes[2].flags) + } } // Ensure that a node can deserialize from a leaf page. @@ -47,12 +53,18 @@ func TestNode_read_LeafPage(t *testing.T) { n.read(page) // Check that there are two inodes with correct data. - assert.True(t, n.isLeaf) - assert.Equal(t, len(n.inodes), 2) - assert.Equal(t, n.inodes[0].key, []byte("bar")) - assert.Equal(t, n.inodes[0].value, []byte("fooz")) - assert.Equal(t, n.inodes[1].key, []byte("helloworld")) - assert.Equal(t, n.inodes[1].value, []byte("bye")) + if !n.isLeaf { + t.Fatal("expected leaf") + } + if len(n.inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } } // Ensure that a node can serialize into a leaf page. @@ -73,13 +85,18 @@ func TestNode_write_LeafPage(t *testing.T) { n2.read(p) // Check that the two pages are the same. - assert.Equal(t, len(n2.inodes), 3) - assert.Equal(t, n2.inodes[0].key, []byte("john")) - assert.Equal(t, n2.inodes[0].value, []byte("johnson")) - assert.Equal(t, n2.inodes[1].key, []byte("ricki")) - assert.Equal(t, n2.inodes[1].value, []byte("lake")) - assert.Equal(t, n2.inodes[2].key, []byte("susy")) - assert.Equal(t, n2.inodes[2].value, []byte("que")) + if len(n2.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n2.inodes)) + } + if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } } // Ensure that a node can split into appropriate subgroups. @@ -96,9 +113,15 @@ func TestNode_split(t *testing.T) { n.split(100) var parent = n.parent - assert.Equal(t, len(parent.children), 2) - assert.Equal(t, len(parent.children[0].inodes), 2) - assert.Equal(t, len(parent.children[1].inodes), 3) + if len(parent.children) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children)) + } + if len(parent.children[0].inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) + } + if len(parent.children[1].inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) + } } // Ensure that a page with the minimum number of inodes just returns a single node. @@ -110,7 +133,9 @@ func TestNode_split_MinKeys(t *testing.T) { // Split. n.split(20) - assert.Nil(t, n.parent) + if n.parent != nil { + t.Fatalf("expected nil parent") + } } // Ensure that a node that has keys that all fit on a page just returns one leaf. @@ -125,5 +150,7 @@ func TestNode_split_SinglePage(t *testing.T) { // Split. n.split(4096) - assert.Nil(t, n.parent) + if n.parent != nil { + t.Fatalf("expected nil parent") + } } diff --git a/page_test.go b/page_test.go index be90096..7a4d327 100644 --- a/page_test.go +++ b/page_test.go @@ -1,17 +1,26 @@ package bolt import ( - "github.com/stretchr/testify/assert" "testing" ) // Ensure that the page type can be returned in human readable format. func TestPage_typ(t *testing.T) { - assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch") - assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf") - assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta") - assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist") - assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>") + if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { + t.Fatalf("exp=branch; got=%v", typ) + } + if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { + t.Fatalf("exp=leaf; got=%v", typ) + } + if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { + t.Fatalf("exp=meta; got=%v", typ) + } + if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { + t.Fatalf("exp=freelist; got=%v", typ) + } + if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { + t.Fatalf("exp=unknown<4e20>; got=%v", typ) + } } // Ensure that the hexdump debugging function doesn't blow up. diff --git a/quick_test.go b/quick_test.go index b083250..4da5817 100644 --- a/quick_test.go +++ b/quick_test.go @@ -1,9 +1,11 @@ -package bolt +package bolt_test import ( "bytes" "flag" + "fmt" "math/rand" + "os" "reflect" "testing/quick" "time" @@ -28,8 +30,8 @@ func init() { flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") flag.Parse() - warn("seed:", qseed) - warnf("quick settings: count=%v, items=%v, ksize=%v, vsize=%v", qcount, qmaxitems, qmaxksize, qmaxvsize) + fmt.Fprintln(os.Stderr, "seed:", qseed) + fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) } func qconfig() *quick.Config { diff --git a/simulation_test.go b/simulation_test.go index 482349f..ceb8bae 100644 --- a/simulation_test.go +++ b/simulation_test.go @@ -1,4 +1,4 @@ -package bolt +package bolt_test import ( "bytes" @@ -7,7 +7,7 @@ import ( "sync" "testing" - "github.com/stretchr/testify/assert" + "github.com/boltdb/bolt" ) func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } @@ -39,86 +39,88 @@ func testSimulate(t *testing.T, threadCount, parallelism int) { var readerHandlers = []simulateHandler{simulateGetHandler} var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} - var versions = make(map[txid]*QuickDB) + var versions = make(map[int]*QuickDB) versions[1] = NewQuickDB() - withOpenDB(func(db *DB, path string) { - var mutex sync.Mutex - // Run n threads in parallel, each with their own operation. - var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers + db := NewTestDB() + defer db.Close() - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } + var mutex sync.Mutex - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() + // Run n threads in parallel, each with their own operation. + var wg sync.WaitGroup + var threads = make(chan bool, parallelism) + var i int + for { + threads <- true + wg.Add(1) + writable := ((rand.Int() % 100) < 20) // 20% writers - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } - - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.id()] - if writable { - qdb = versions[tx.id()-1].Copy() - } - mutex.Unlock() - - // Make sure we commit/rollback the tx at the end and update the state. - if writable { - defer func() { - mutex.Lock() - versions[tx.id()] = qdb - mutex.Unlock() - - assert.NoError(t, tx.Commit()) - }() - } else { - defer tx.Rollback() - } - - // Ignore operation if we don't have data yet. - if qdb == nil { - return - } - - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) - - i++ - if i > threadCount { - break - } + // Choose an operation to execute. + var handler simulateHandler + if writable { + handler = writerHandlers[rand.Intn(len(writerHandlers))] + } else { + handler = readerHandlers[rand.Intn(len(readerHandlers))] } - // Wait until all threads are done. - wg.Wait() - }) + // Execute a thread for the given operation. + go func(writable bool, handler simulateHandler) { + defer wg.Done() + + // Start transaction. + tx, err := db.Begin(writable) + if err != nil { + t.Fatal("tx begin: ", err) + } + + // Obtain current state of the dataset. + mutex.Lock() + var qdb = versions[tx.ID()] + if writable { + qdb = versions[tx.ID()-1].Copy() + } + mutex.Unlock() + + // Make sure we commit/rollback the tx at the end and update the state. + if writable { + defer func() { + mutex.Lock() + versions[tx.ID()] = qdb + mutex.Unlock() + + ok(t, tx.Commit()) + }() + } else { + defer tx.Rollback() + } + + // Ignore operation if we don't have data yet. + if qdb == nil { + return + } + + // Execute handler. + handler(tx, qdb) + + // Release a thread back to the scheduling loop. + <-threads + }(writable, handler) + + i++ + if i > threadCount { + break + } + } + + // Wait until all threads are done. + wg.Wait() } -type simulateHandler func(tx *Tx, qdb *QuickDB) +type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) // Retrieves a key from the database and verifies that it is what is expected. -func simulateGetHandler(tx *Tx, qdb *QuickDB) { +func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { // Randomly retrieve an existing exist. keys := qdb.Rand() if len(keys) == 0 { @@ -153,7 +155,7 @@ func simulateGetHandler(tx *Tx, qdb *QuickDB) { } // Inserts a key into the database. -func simulatePutHandler(tx *Tx, qdb *QuickDB) { +func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { var err error keys, value := randKeys(), randValue() diff --git a/tx.go b/tx.go index bc2842f..62b9be6 100644 --- a/tx.go +++ b/tx.go @@ -52,9 +52,9 @@ func (tx *Tx) init(db *DB) { } } -// id returns the transaction id. -func (tx *Tx) id() txid { - return tx.meta.txid +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) } // DB returns a reference to the database that created the transaction. @@ -158,7 +158,7 @@ func (tx *Tx) Commit() error { // Free the freelist and allocate new pages for it. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.id(), tx.db.page(tx.meta.freelist)) + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) if err != nil { tx.rollback() @@ -218,7 +218,7 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.id()) + tx.db.freelist.rollback(tx.meta.txid) tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) } tx.close() diff --git a/tx_test.go b/tx_test.go index 0528c0d..39f50c4 100644 --- a/tx_test.go +++ b/tx_test.go @@ -1,4 +1,4 @@ -package bolt +package bolt_test import ( "errors" @@ -6,310 +6,302 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" + "github.com/boltdb/bolt" ) // Ensure that committing a closed transaction returns an error. func TestTx_Commit_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("foo")) - assert.NoError(t, tx.Commit()) - assert.Equal(t, tx.Commit(), ErrTxClosed) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.CreateBucket([]byte("foo")) + ok(t, tx.Commit()) + equals(t, tx.Commit(), bolt.ErrTxClosed) } // Ensure that rolling back a closed transaction returns an error. func TestTx_Rollback_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - assert.NoError(t, tx.Rollback()) - assert.Equal(t, tx.Rollback(), ErrTxClosed) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + ok(t, tx.Rollback()) + equals(t, tx.Rollback(), bolt.ErrTxClosed) } // Ensure that committing a read-only transaction returns an error. func TestTx_Commit_ReadOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(false) - assert.Equal(t, tx.Commit(), ErrTxNotWritable) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(false) + equals(t, tx.Commit(), bolt.ErrTxNotWritable) } // Ensure that a transaction can retrieve a cursor on the root bucket. func TestTx_Cursor(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) - c := tx.Cursor() + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.CreateBucket([]byte("woojits")) + c := tx.Cursor() - k, v := c.First() - assert.Equal(t, "widgets", string(k)) - assert.Nil(t, v) + k, v := c.First() + equals(t, "widgets", string(k)) + assert(t, v == nil, "") - k, v = c.Next() - assert.Equal(t, "woojits", string(k)) - assert.Nil(t, v) + k, v = c.Next() + equals(t, "woojits", string(k)) + assert(t, v == nil, "") - k, v = c.Next() - assert.Nil(t, k) - assert.Nil(t, v) + k, v = c.Next() + assert(t, k == nil, "") + assert(t, v == nil, "") - return nil - }) + return nil }) } // Ensure that creating a bucket with a read-only transaction returns an error. func TestTx_CreateBucket_ReadOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.View(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - assert.Nil(t, b) - assert.Equal(t, ErrTxNotWritable, err) - return nil - }) + db := NewTestDB() + defer db.Close() + db.View(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("foo")) + assert(t, b == nil, "") + equals(t, bolt.ErrTxNotWritable, err) + return nil }) } // Ensure that creating a bucket on a closed transaction returns an error. func TestTx_CreateBucket_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.Commit() - b, err := tx.CreateBucket([]byte("foo")) - assert.Nil(t, b) - assert.Equal(t, ErrTxClosed, err) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.Commit() + b, err := tx.CreateBucket([]byte("foo")) + assert(t, b == nil, "") + equals(t, bolt.ErrTxClosed, err) } // Ensure that a Tx can retrieve a bucket. func TestTx_Bucket(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - assert.NotNil(t, b) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + b := tx.Bucket([]byte("widgets")) + assert(t, b != nil, "") + return nil }) } // Ensure that a Tx retrieving a non-existent key returns nil. func TestTx_Get_Missing(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) - assert.Nil(t, value) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) + assert(t, value == nil, "") + return nil }) } // Ensure that a bucket can be created and retrieved. func TestTx_CreateBucket(t *testing.T) { - withOpenDB(func(db *DB, path string) { - // Create a bucket. - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) - return nil - }) + db := NewTestDB() + defer db.Close() - // Read the bucket through a separate transaction. - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - assert.NotNil(t, b) - return nil - }) + // Create a bucket. + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + return nil + }) + + // Read the bucket through a separate transaction. + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + assert(t, b != nil, "") + return nil }) } // Ensure that a bucket can be created if it doesn't already exist. func TestTx_CreateBucketIfNotExists(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) - b, err = tx.CreateBucketIfNotExists([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) + b, err = tx.CreateBucketIfNotExists([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) - b, err = tx.CreateBucketIfNotExists([]byte{}) - assert.Nil(t, b) - assert.Equal(t, ErrBucketNameRequired, err) + b, err = tx.CreateBucketIfNotExists([]byte{}) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) - b, err = tx.CreateBucketIfNotExists(nil) - assert.Nil(t, b) - assert.Equal(t, ErrBucketNameRequired, err) - return nil - }) + b, err = tx.CreateBucketIfNotExists(nil) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) + return nil + }) - // Read the bucket through a separate transaction. - db.View(func(tx *Tx) error { - b := tx.Bucket([]byte("widgets")) - assert.NotNil(t, b) - return nil - }) + // Read the bucket through a separate transaction. + db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + assert(t, b != nil, "") + return nil }) } // Ensure that a bucket cannot be created twice. func TestTx_CreateBucket_Exists(t *testing.T) { - withOpenDB(func(db *DB, path string) { - // Create a bucket. - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) - return nil - }) + db := NewTestDB() + defer db.Close() + // Create a bucket. + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + return nil + }) - // Create the same bucket again. - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert.Nil(t, b) - assert.Equal(t, ErrBucketExists, err) - return nil - }) + // Create the same bucket again. + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketExists, err) + return nil }) } // Ensure that a bucket is created with a non-blank name. func TestTx_CreateBucket_NameRequired(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - b, err := tx.CreateBucket(nil) - assert.Nil(t, b) - assert.Equal(t, ErrBucketNameRequired, err) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket(nil) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) + return nil }) } // Ensure that a bucket can be deleted. func TestTx_DeleteBucket(t *testing.T) { - withOpenDB(func(db *DB, path string) { - // Create a bucket and add a value. - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - return nil - }) + db := NewTestDB() + defer db.Close() - // Save root page id. - var root pgid - db.View(func(tx *Tx) error { - root = tx.Bucket([]byte("widgets")).root - return nil - }) + // Create a bucket and add a value. + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + return nil + }) - // Delete the bucket and make sure we can't get the value. - db.Update(func(tx *Tx) error { - assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) - assert.Nil(t, tx.Bucket([]byte("widgets"))) - return nil - }) + // Delete the bucket and make sure we can't get the value. + db.Update(func(tx *bolt.Tx) error { + ok(t, tx.DeleteBucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) == nil, "") + return nil + }) - db.Update(func(tx *Tx) error { - // Verify that the bucket's page is free. - assert.Equal(t, []pgid{4, 5}, db.freelist.all()) - - // Create the bucket again and make sure there's not a phantom value. - b, err := tx.CreateBucket([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) - assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - return nil - }) + db.Update(func(tx *bolt.Tx) error { + // Create the bucket again and make sure there's not a phantom value. + b, err := tx.CreateBucket([]byte("widgets")) + assert(t, b != nil, "") + ok(t, err) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + return nil }) } // Ensure that deleting a bucket on a closed transaction returns an error. func TestTx_DeleteBucket_Closed(t *testing.T) { - withOpenDB(func(db *DB, path string) { - tx, _ := db.Begin(true) - tx.Commit() - assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed) - }) + db := NewTestDB() + defer db.Close() + tx, _ := db.Begin(true) + tx.Commit() + equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) } // Ensure that deleting a bucket with a read-only transaction returns an error. func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.View(func(tx *Tx) error { - assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable) - return nil - }) + db := NewTestDB() + defer db.Close() + db.View(func(tx *bolt.Tx) error { + equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) + return nil }) } // Ensure that nothing happens when deleting a bucket that doesn't exist. func TestTx_DeleteBucket_NotFound(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) - return nil - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) + return nil }) } // Ensure that Tx commit handlers are called after a transaction successfully commits. func TestTx_OnCommit(t *testing.T) { var x int - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + _, err := tx.CreateBucket([]byte("widgets")) + return err }) - assert.Equal(t, 3, x) + equals(t, 3, x) } // Ensure that Tx commit handlers are NOT called after a transaction rolls back. func TestTx_OnCommit_Rollback(t *testing.T) { var x int - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - tx.CreateBucket([]byte("widgets")) - return errors.New("rollback this commit") - }) + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.OnCommit(func() { x += 1 }) + tx.OnCommit(func() { x += 2 }) + tx.CreateBucket([]byte("widgets")) + return errors.New("rollback this commit") }) - assert.Equal(t, 0, x) + equals(t, 0, x) } // Ensure that the database can be copied to a file path. func TestTx_CopyFile(t *testing.T) { - withOpenDB(func(db *DB, path string) { - var dest = tempfile() - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) + db := NewTestDB() + defer db.Close() + var dest = tempfile() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + return nil + }) - assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) })) + ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) - db2, err := Open(dest, 0600, nil) - assert.NoError(t, err) - defer db2.Close() + db2, err := bolt.Open(dest, 0600, nil) + ok(t, err) + defer db2.Close() - db2.View(func(tx *Tx) error { - assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) + db2.View(func(tx *bolt.Tx) error { + equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + return nil }) } @@ -336,48 +328,48 @@ func (f *failWriter) Write(p []byte) (n int, err error) { // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Meta(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) }) - assert.EqualError(t, err, "meta copy: error injected for tests") + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + return nil }) + + err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) + equals(t, err.Error(), "meta copy: error injected for tests") } // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Normal(t *testing.T) { - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) - return nil - }) - - err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) }) - assert.EqualError(t, err, "error injected for tests") + db := NewTestDB() + defer db.Close() + db.Update(func(tx *bolt.Tx) error { + tx.CreateBucket([]byte("widgets")) + tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + return nil }) + + err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) + equals(t, err.Error(), "error injected for tests") } func ExampleTx_Rollback() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Create a bucket. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) // Set a value for a key. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) }) @@ -388,7 +380,7 @@ func ExampleTx_Rollback() { tx.Rollback() // Ensure that our original value is still set. - db.View(func(tx *Tx) error { + db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value for 'foo' is still: %s\n", value) return nil @@ -400,12 +392,12 @@ func ExampleTx_Rollback() { func ExampleTx_CopyFile() { // Open the database. - db, _ := Open(tempfile(), 0666, nil) + db, _ := bolt.Open(tempfile(), 0666, nil) defer os.Remove(db.Path()) defer db.Close() // Create a bucket and a key. - db.Update(func(tx *Tx) error { + db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) return nil @@ -413,15 +405,15 @@ func ExampleTx_CopyFile() { // Copy the database to another file. toFile := tempfile() - db.View(func(tx *Tx) error { return tx.CopyFile(toFile, 0666) }) + db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) }) defer os.Remove(toFile) // Open the cloned database. - db2, _ := Open(toFile, 0666, nil) + db2, _ := bolt.Open(toFile, 0666, nil) defer db2.Close() // Ensure that the key exists in the copy. - db2.View(func(tx *Tx) error { + db2.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value for 'foo' in the clone is: %s\n", value) return nil