diff --git a/bolt_test.go b/bolt_test.go new file mode 100644 index 0000000..b7bea1f --- /dev/null +++ b/bolt_test.go @@ -0,0 +1,36 @@ +package bolt_test + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +// assert fails the test if the condition is false. +func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// ok fails the test if an err is not nil. +func ok(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.FailNow() + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/bucket_test.go b/bucket_test.go index 6923b6c..90e704a 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -13,7 +13,6 @@ import ( "testing/quick" "github.com/boltdb/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a bucket that gets a non-existent key returns nil. @@ -23,7 +22,7 @@ func TestBucket_Get_NonExistent(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Nil(t, value) + assert(t, value == nil, "") return nil }) } @@ -37,7 +36,7 @@ func TestBucket_Get_FromNode(t *testing.T) { b := tx.Bucket([]byte("widgets")) b.Put([]byte("foo"), []byte("bar")) value := b.Get([]byte("foo")) - assert.Equal(t, value, []byte("bar")) + equals(t, []byte("bar"), value) return nil }) } @@ -49,8 +48,8 @@ func TestBucket_Get_IncompatibleValue(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) + ok(t, err) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") return nil }) } @@ -62,9 +61,9 @@ func TestBucket_Put(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - assert.NoError(t, err) + ok(t, err) value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Equal(t, value, []byte("bar")) + equals(t, value, []byte("bar")) return nil }) } @@ -76,10 +75,10 @@ func TestBucket_Put_Repeat(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) - assert.NoError(t, b.Put([]byte("foo"), []byte("bar"))) - assert.NoError(t, b.Put([]byte("foo"), []byte("baz"))) + ok(t, b.Put([]byte("foo"), []byte("bar"))) + ok(t, b.Put([]byte("foo"), []byte("baz"))) value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Equal(t, value, []byte("baz")) + equals(t, value, []byte("baz")) return nil }) } @@ -94,7 +93,7 @@ func TestBucket_Put_Large(t *testing.T) { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) for i := 1; i < count; i++ { - assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) + ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) } return nil }) @@ -102,7 +101,7 @@ func TestBucket_Put_Large(t *testing.T) { b := tx.Bucket([]byte("widgets")) for i := 1; i < count; i++ { value := b.Get([]byte(strings.Repeat("0", i*factor))) - assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value) + equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) } return nil }) @@ -126,11 +125,11 @@ func TestDB_Put_VeryLarge(t *testing.T) { for j := 0; j < batchN; j++ { k, v := make([]byte, ksize), make([]byte, vsize) binary.BigEndian.PutUint32(k, uint32(i+j)) - assert.NoError(t, b.Put(k, v)) + ok(t, b.Put(k, v)) } return nil }) - assert.NoError(t, err) + ok(t, err) } } @@ -141,8 +140,8 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) - assert.Equal(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + ok(t, err) + equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) return nil }) } @@ -155,7 +154,7 @@ func TestBucket_Put_Closed(t *testing.T) { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) tx.Rollback() - assert.Equal(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) + equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) } // Ensure that setting a value on a read-only bucket returns an error. @@ -164,13 +163,13 @@ func TestBucket_Put_ReadOnly(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) + ok(t, err) return nil }) db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) err := b.Put([]byte("foo"), []byte("bar")) - assert.Equal(t, err, bolt.ErrTxNotWritable) + equals(t, err, bolt.ErrTxNotWritable) return nil }) } @@ -183,9 +182,9 @@ func TestBucket_Delete(t *testing.T) { tx.CreateBucket([]byte("widgets")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - assert.NoError(t, err) + ok(t, err) value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert.Nil(t, value) + assert(t, value == nil, "") return nil }) } @@ -197,21 +196,21 @@ func TestBucket_Delete_Large(t *testing.T) { db.Update(func(tx *bolt.Tx) error { var b, _ = tx.CreateBucket([]byte("widgets")) for i := 0; i < 100; i++ { - assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) + ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) } return nil }) db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { - assert.NoError(t, b.Delete([]byte(strconv.Itoa(i)))) + ok(t, b.Delete([]byte(strconv.Itoa(i)))) } return nil }) db.View(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { - assert.Nil(t, b.Get([]byte(strconv.Itoa(i)))) + assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") } return nil }) @@ -260,7 +259,7 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { }) // Check that a freelist overflow occurred. - assert.NoError(t, err) + ok(t, err) } // Ensure that accessing and updating nested buckets is ok across transactions. @@ -270,14 +269,14 @@ func TestBucket_Nested(t *testing.T) { db.Update(func(tx *bolt.Tx) error { // Create a widgets bucket. b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) + ok(t, err) // Create a widgets/foo bucket. _, err = b.CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) // Create a widgets/bar key. - assert.NoError(t, b.Put([]byte("bar"), []byte("0000"))) + ok(t, b.Put([]byte("bar"), []byte("0000"))) return nil }) @@ -286,7 +285,7 @@ func TestBucket_Nested(t *testing.T) { // Update widgets/bar. db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - assert.NoError(t, b.Put([]byte("bar"), []byte("xxxx"))) + ok(t, b.Put([]byte("bar"), []byte("xxxx"))) return nil }) db.MustCheck() @@ -295,7 +294,7 @@ func TestBucket_Nested(t *testing.T) { db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) for i := 0; i < 10000; i++ { - assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) + ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) } return nil }) @@ -304,7 +303,7 @@ func TestBucket_Nested(t *testing.T) { // Insert into widgets/foo/baz. db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - assert.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) + ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) return nil }) db.MustCheck() @@ -312,10 +311,10 @@ func TestBucket_Nested(t *testing.T) { // Verify. db.View(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - assert.Equal(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) - assert.Equal(t, []byte("xxxx"), b.Get([]byte("bar"))) + equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) + equals(t, []byte("xxxx"), b.Get([]byte("bar"))) for i := 0; i < 10000; i++ { - assert.Equal(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) + equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) } return nil }) @@ -329,8 +328,8 @@ func TestBucket_Delete_Bucket(t *testing.T) { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) _, err := b.CreateBucket([]byte("foo")) - assert.NoError(t, err) - assert.Equal(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) + ok(t, err) + equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) return nil }) } @@ -346,7 +345,7 @@ func TestBucket_Delete_ReadOnly(t *testing.T) { db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) err := b.Delete([]byte("foo")) - assert.Equal(t, err, bolt.ErrTxNotWritable) + equals(t, err, bolt.ErrTxNotWritable) return nil }) } @@ -359,7 +358,7 @@ func TestBucket_Delete_Closed(t *testing.T) { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) tx.Rollback() - assert.Equal(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) + equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) } // Ensure that deleting a bucket causes nested buckets to be deleted. @@ -369,11 +368,11 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) return nil }) } @@ -385,22 +384,22 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) return nil }) db.Update(func(tx *bolt.Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) - assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))) - assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar"))) - assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) - assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) != nil, "") + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) + ok(t, tx.DeleteBucket([]byte("widgets"))) return nil }) db.View(func(tx *bolt.Tx) error { - assert.Nil(t, tx.Bucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) == nil, "") return nil }) } @@ -411,17 +410,17 @@ func TestBucket_DeleteBucket_Large(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) + ok(t, err) _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) for i := 0; i < 1000; i++ { - assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) + ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) } return nil }) db.Update(func(tx *bolt.Tx) error { - assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) + ok(t, tx.DeleteBucket([]byte("widgets"))) return nil }) @@ -434,8 +433,8 @@ func TestBucket_Bucket_IncompatibleValue(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") return nil }) } @@ -446,10 +445,10 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - assert.Equal(t, bolt.ErrIncompatibleValue, err) + equals(t, bolt.ErrIncompatibleValue, err) return nil }) } @@ -460,9 +459,9 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert.Equal(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + ok(t, err) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) return nil }) } @@ -477,16 +476,16 @@ func TestBucket_NextSequence(t *testing.T) { // Make sure sequence increments. seq, err := tx.Bucket([]byte("widgets")).NextSequence() - assert.NoError(t, err) - assert.Equal(t, seq, uint64(1)) + ok(t, err) + equals(t, seq, uint64(1)) seq, err = tx.Bucket([]byte("widgets")).NextSequence() - assert.NoError(t, err) - assert.Equal(t, seq, uint64(2)) + ok(t, err) + equals(t, seq, uint64(2)) // Buckets should be separate. seq, err = tx.Bucket([]byte("woojits")).NextSequence() - assert.NoError(t, err) - assert.Equal(t, seq, uint64(1)) + ok(t, err) + equals(t, seq, uint64(1)) return nil }) } @@ -502,8 +501,8 @@ func TestBucket_NextSequence_ReadOnly(t *testing.T) { db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) i, err := b.NextSequence() - assert.Equal(t, i, uint64(0)) - assert.Equal(t, err, bolt.ErrTxNotWritable) + equals(t, i, uint64(0)) + equals(t, err, bolt.ErrTxNotWritable) return nil }) } @@ -517,7 +516,7 @@ func TestBucket_NextSequence_Closed(t *testing.T) { b := tx.Bucket([]byte("widgets")) tx.Rollback() _, err := b.NextSequence() - assert.Equal(t, bolt.ErrTxClosed, err) + equals(t, bolt.ErrTxClosed, err) } // Ensure a user can loop over all key/value pairs in a bucket. @@ -534,20 +533,20 @@ func TestBucket_ForEach(t *testing.T) { err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { switch index { case 0: - assert.Equal(t, k, []byte("bar")) - assert.Equal(t, v, []byte("0002")) + equals(t, k, []byte("bar")) + equals(t, v, []byte("0002")) case 1: - assert.Equal(t, k, []byte("baz")) - assert.Equal(t, v, []byte("0001")) + equals(t, k, []byte("baz")) + equals(t, v, []byte("0001")) case 2: - assert.Equal(t, k, []byte("foo")) - assert.Equal(t, v, []byte("0000")) + equals(t, k, []byte("foo")) + equals(t, v, []byte("0000")) } index++ return nil }) - assert.NoError(t, err) - assert.Equal(t, index, 3) + ok(t, err) + equals(t, index, 3) return nil }) } @@ -570,8 +569,8 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) { } return nil }) - assert.Equal(t, errors.New("marker"), err) - assert.Equal(t, 2, index) + equals(t, errors.New("marker"), err) + equals(t, 2, index) return nil }) } @@ -585,7 +584,7 @@ func TestBucket_ForEach_Closed(t *testing.T) { b := tx.Bucket([]byte("widgets")) tx.Rollback() err := b.ForEach(func(k, v []byte) error { return nil }) - assert.Equal(t, bolt.ErrTxClosed, err) + equals(t, bolt.ErrTxClosed, err) } // Ensure that an error is returned when inserting with an empty key. @@ -595,9 +594,9 @@ func TestBucket_Put_EmptyKey(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) - assert.Equal(t, err, bolt.ErrKeyRequired) + equals(t, err, bolt.ErrKeyRequired) err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) - assert.Equal(t, err, bolt.ErrKeyRequired) + equals(t, err, bolt.ErrKeyRequired) return nil }) } @@ -609,7 +608,7 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) - assert.Equal(t, err, bolt.ErrKeyTooLarge) + equals(t, err, bolt.ErrKeyTooLarge) return nil }) } @@ -636,33 +635,33 @@ func TestBucket_Stats(t *testing.T) { db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("woojits")) stats := b.Stats() - assert.Equal(t, 1, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 7, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 2, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 501, stats.KeyN, "KeyN") - assert.Equal(t, 2, stats.Depth, "Depth") + equals(t, 1, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 7, stats.LeafPageN) + equals(t, 2, stats.LeafOverflowN) + equals(t, 501, stats.KeyN) + equals(t, 2, stats.Depth) branchInuse := 16 // branch page header branchInuse += 7 * 16 // branch elements branchInuse += 7 * 3 // branch keys (6 3-byte keys) - assert.Equal(t, branchInuse, stats.BranchInuse, "BranchInuse") + equals(t, branchInuse, stats.BranchInuse) leafInuse := 7 * 16 // leaf page header leafInuse += 501 * 16 // leaf elements leafInuse += 500*3 + len(big_key) // leaf keys leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - assert.Equal(t, leafInuse, stats.LeafInuse, "LeafInuse") + equals(t, leafInuse, stats.LeafInuse) if os.Getpagesize() == 4096 { // Incompatible page size - assert.Equal(t, 4096, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 36864, stats.LeafAlloc, "LeafAlloc") + equals(t, 4096, stats.BranchAlloc) + equals(t, 36864, stats.LeafAlloc) } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse") + equals(t, 1, stats.BucketN) + equals(t, 0, stats.InlineBucketN) + equals(t, 0, stats.InlineBucketInuse) return nil }) } @@ -698,17 +697,17 @@ func TestBucket_Stats_RandomFill(t *testing.T) { db.View(func(tx *bolt.Tx) error { s := tx.Bucket([]byte("woojits")).Stats() - assert.Equal(t, 100000, s.KeyN, "KeyN") + equals(t, 100000, s.KeyN) - assert.Equal(t, 98, s.BranchPageN, "BranchPageN") - assert.Equal(t, 0, s.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 130984, s.BranchInuse, "BranchInuse") - assert.Equal(t, 401408, s.BranchAlloc, "BranchAlloc") + equals(t, 98, s.BranchPageN) + equals(t, 0, s.BranchOverflowN) + equals(t, 130984, s.BranchInuse) + equals(t, 401408, s.BranchAlloc) - assert.Equal(t, 3412, s.LeafPageN, "LeafPageN") - assert.Equal(t, 0, s.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 4742482, s.LeafInuse, "LeafInuse") - assert.Equal(t, 13975552, s.LeafAlloc, "LeafAlloc") + equals(t, 3412, s.LeafPageN) + equals(t, 0, s.LeafOverflowN) + equals(t, 4742482, s.LeafInuse) + equals(t, 13975552, s.LeafAlloc) return nil }) } @@ -720,7 +719,7 @@ func TestBucket_Stats_Small(t *testing.T) { db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. b, err := tx.CreateBucket([]byte("whozawhats")) - assert.NoError(t, err) + ok(t, err) b.Put([]byte("foo"), []byte("bar")) return nil @@ -729,22 +728,22 @@ func TestBucket_Stats_Small(t *testing.T) { db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() - assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 1, stats.KeyN, "KeyN") - assert.Equal(t, 1, stats.Depth, "Depth") - assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 0, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 1, stats.KeyN) + equals(t, 1, stats.Depth) + equals(t, 0, stats.BranchInuse) + equals(t, 0, stats.LeafInuse) if os.Getpagesize() == 4096 { // Incompatible page size - assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") + equals(t, 0, stats.BranchAlloc) + equals(t, 0, stats.LeafAlloc) } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, 16+16+6, stats.InlineBucketInuse, "InlineBucketInuse") + equals(t, 1, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, 16+16+6, stats.InlineBucketInuse) return nil }) } @@ -756,29 +755,29 @@ func TestBucket_Stats_EmptyBucket(t *testing.T) { db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. _, err := tx.CreateBucket([]byte("whozawhats")) - assert.NoError(t, err) + ok(t, err) return nil }) db.MustCheck() db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() - assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 0, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 0, stats.KeyN, "KeyN") - assert.Equal(t, 1, stats.Depth, "Depth") - assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 0, stats.LeafInuse, "LeafInuse") + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 0, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 0, stats.KeyN) + equals(t, 1, stats.Depth) + equals(t, 0, stats.BranchInuse) + equals(t, 0, stats.LeafInuse) if os.Getpagesize() == 4096 { // Incompatible page size - assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc") + equals(t, 0, stats.BranchAlloc) + equals(t, 0, stats.LeafAlloc) } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, 16, stats.InlineBucketInuse, "InlineBucketInuse") + equals(t, 1, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, 16, stats.InlineBucketInuse) return nil }) } @@ -790,17 +789,17 @@ func TestBucket_Stats_Nested(t *testing.T) { db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) for i := 0; i < 100; i++ { b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) } bar, err := b.CreateBucket([]byte("bar")) - assert.NoError(t, err) + ok(t, err) for i := 0; i < 10; i++ { bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) } baz, err := bar.CreateBucket([]byte("baz")) - assert.NoError(t, err) + ok(t, err) for i := 0; i < 10; i++ { baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) } @@ -812,13 +811,13 @@ func TestBucket_Stats_Nested(t *testing.T) { db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("foo")) stats := b.Stats() - assert.Equal(t, 0, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 2, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 122, stats.KeyN, "KeyN") - assert.Equal(t, 3, stats.Depth, "Depth") - assert.Equal(t, 0, stats.BranchInuse, "BranchInuse") + equals(t, 0, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 2, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 122, stats.KeyN) + equals(t, 3, stats.Depth) + equals(t, 0, stats.BranchInuse) foo := 16 // foo (pghdr) foo += 101 * 16 // foo leaf elements @@ -834,15 +833,15 @@ func TestBucket_Stats_Nested(t *testing.T) { baz += 10 * 16 // baz leaf elements baz += 10 + 10 // baz leaf key/values - assert.Equal(t, foo+bar+baz, stats.LeafInuse, "LeafInuse") + equals(t, foo+bar+baz, stats.LeafInuse) if os.Getpagesize() == 4096 { // Incompatible page size - assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 8192, stats.LeafAlloc, "LeafAlloc") + equals(t, 0, stats.BranchAlloc) + equals(t, 8192, stats.LeafAlloc) } - assert.Equal(t, 3, stats.BucketN, "BucketN") - assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, baz, stats.InlineBucketInuse, "InlineBucketInuse") + equals(t, 3, stats.BucketN) + equals(t, 1, stats.InlineBucketN) + equals(t, baz, stats.InlineBucketInuse) return nil }) } @@ -873,22 +872,22 @@ func TestBucket_Stats_Large(t *testing.T) { db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) stats := b.Stats() - assert.Equal(t, 13, stats.BranchPageN, "BranchPageN") - assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 1196, stats.LeafPageN, "LeafPageN") - assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") - assert.Equal(t, 100000, stats.KeyN, "KeyN") - assert.Equal(t, 3, stats.Depth, "Depth") - assert.Equal(t, 25257, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 2596916, stats.LeafInuse, "LeafInuse") + equals(t, 13, stats.BranchPageN) + equals(t, 0, stats.BranchOverflowN) + equals(t, 1196, stats.LeafPageN) + equals(t, 0, stats.LeafOverflowN) + equals(t, 100000, stats.KeyN) + equals(t, 3, stats.Depth) + equals(t, 25257, stats.BranchInuse) + equals(t, 2596916, stats.LeafInuse) if os.Getpagesize() == 4096 { // Incompatible page size - assert.Equal(t, 53248, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 4898816, stats.LeafAlloc, "LeafAlloc") + equals(t, 53248, stats.BranchAlloc) + equals(t, 4898816, stats.LeafAlloc) } - assert.Equal(t, 1, stats.BucketN, "BucketN") - assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") - assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse") + equals(t, 1, stats.BucketN) + equals(t, 0, stats.InlineBucketN) + equals(t, 0, stats.InlineBucketInuse) return nil }) } @@ -960,20 +959,20 @@ func TestBucket_Put_Multiple(t *testing.T) { err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) + ok(t, b.Put(item.Key, item.Value)) } return nil }) - assert.NoError(t, err) + ok(t, err) // Verify all items exist. db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { value := b.Get(item.Key) - if !assert.Equal(t, item.Value, value) { + if !bytes.Equal(item.Value, value) { db.CopyTempFile() - t.FailNow() + t.Fatalf("exp=%x; got=%x", item.Value, value) } } return nil @@ -1002,18 +1001,18 @@ func TestBucket_Delete_Quick(t *testing.T) { err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) + ok(t, b.Put(item.Key, item.Value)) } return nil }) - assert.NoError(t, err) + ok(t, err) // Remove items one at a time and check consistency. for _, item := range items { err := db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Delete(item.Key) }) - assert.NoError(t, err) + ok(t, err) } // Anything before our deletion index should be nil. diff --git a/cmd/bolt/buckets_test.go b/cmd/bolt/buckets_test.go index 27ee619..d5050fd 100644 --- a/cmd/bolt/buckets_test.go +++ b/cmd/bolt/buckets_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a list of buckets can be retrieved. @@ -20,7 +19,7 @@ func TestBuckets(t *testing.T) { }) db.Close() output := run("buckets", path) - assert.Equal(t, "whatchits\nwidgets\nwoojits", output) + equals(t, "whatchits\nwidgets\nwoojits", output) }) } @@ -28,5 +27,5 @@ func TestBuckets(t *testing.T) { func TestBucketsDBNotFound(t *testing.T) { SetTestMode(true) output := run("buckets", "no/such/db") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } diff --git a/cmd/bolt/export_test.go b/cmd/bolt/export_test.go index 13f57d1..d98403c 100644 --- a/cmd/bolt/export_test.go +++ b/cmd/bolt/export_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a database can be exported. @@ -32,7 +31,7 @@ func TestExport(t *testing.T) { }) db.Close() output := run("export", path) - assert.Equal(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output) + equals(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output) }) } @@ -40,5 +39,5 @@ func TestExport(t *testing.T) { func TestExport_NotFound(t *testing.T) { SetTestMode(true) output := run("export", "no/such/db") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } diff --git a/cmd/bolt/get_test.go b/cmd/bolt/get_test.go index 7b7c3a0..8acd0f4 100644 --- a/cmd/bolt/get_test.go +++ b/cmd/bolt/get_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a value can be retrieved from the CLI. @@ -19,7 +18,7 @@ func TestGet(t *testing.T) { }) db.Close() output := run("get", path, "widgets", "foo") - assert.Equal(t, "bar", output) + equals(t, "bar", output) }) } @@ -27,7 +26,7 @@ func TestGet(t *testing.T) { func TestGetDBNotFound(t *testing.T) { SetTestMode(true) output := run("get", "no/such/db", "widgets", "foo") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } // Ensure that an error is reported if the bucket is not found. @@ -36,7 +35,7 @@ func TestGetBucketNotFound(t *testing.T) { open(func(db *bolt.DB, path string) { db.Close() output := run("get", path, "widgets", "foo") - assert.Equal(t, "bucket not found: widgets", output) + equals(t, "bucket not found: widgets", output) }) } @@ -50,6 +49,6 @@ func TestGetKeyNotFound(t *testing.T) { }) db.Close() output := run("get", path, "widgets", "foo") - assert.Equal(t, "key not found: foo", output) + equals(t, "key not found: foo", output) }) } diff --git a/cmd/bolt/import_test.go b/cmd/bolt/import_test.go index 3d4f275..086bf03 100644 --- a/cmd/bolt/import_test.go +++ b/cmd/bolt/import_test.go @@ -6,7 +6,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a database can be imported. @@ -15,32 +14,30 @@ func TestImport(t *testing.T) { // Write input file. input := tempfile() - assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600)) + ok(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600)) // Import database. path := tempfile() output := run("import", path, "--input", input) - assert.Equal(t, ``, output) + equals(t, ``, output) // Open database and verify contents. db, err := bolt.Open(path, 0600, nil) - assert.NoError(t, err) + ok(t, err) db.View(func(tx *bolt.Tx) error { - assert.NotNil(t, tx.Bucket([]byte("empty"))) + assert(t, tx.Bucket([]byte("empty")) != nil, "") b := tx.Bucket([]byte("widgets")) - if assert.NotNil(t, b) { - assert.Equal(t, []byte("0000"), b.Get([]byte("foo"))) - assert.Equal(t, []byte(""), b.Get([]byte("bar"))) - } + assert(t, b != nil, "") + equals(t, []byte("0000"), b.Get([]byte("foo"))) + equals(t, []byte(""), b.Get([]byte("bar"))) b = tx.Bucket([]byte("woojits")) - if assert.NotNil(t, b) { - assert.Equal(t, []byte("XXXX"), b.Get([]byte("baz"))) + assert(t, b != nil, "") + equals(t, []byte("XXXX"), b.Get([]byte("baz"))) - b = b.Bucket([]byte("woojits/subbucket")) - assert.Equal(t, []byte("A"), b.Get([]byte("bat"))) - } + b = b.Bucket([]byte("woojits/subbucket")) + equals(t, []byte("A"), b.Get([]byte("bat"))) return nil }) @@ -51,5 +48,5 @@ func TestImport(t *testing.T) { func TestImport_NotFound(t *testing.T) { SetTestMode(true) output := run("import", "path/to/db", "--input", "no/such/file") - assert.Equal(t, "open no/such/file: no such file or directory", output) + equals(t, "open no/such/file: no such file or directory", output) } diff --git a/cmd/bolt/info_test.go b/cmd/bolt/info_test.go index 668cc61..dab74f6 100644 --- a/cmd/bolt/info_test.go +++ b/cmd/bolt/info_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a database info can be printed. @@ -20,7 +19,7 @@ func TestInfo(t *testing.T) { }) db.Close() output := run("info", path) - assert.Equal(t, `Page Size: 4096`, output) + equals(t, `Page Size: 4096`, output) }) } @@ -28,5 +27,5 @@ func TestInfo(t *testing.T) { func TestInfo_NotFound(t *testing.T) { SetTestMode(true) output := run("info", "no/such/db") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } diff --git a/cmd/bolt/keys_test.go b/cmd/bolt/keys_test.go index 2b5a9a0..0cc4e0c 100644 --- a/cmd/bolt/keys_test.go +++ b/cmd/bolt/keys_test.go @@ -5,7 +5,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a list of keys can be retrieved for a given bucket. @@ -21,7 +20,7 @@ func TestKeys(t *testing.T) { }) db.Close() output := run("keys", path, "widgets") - assert.Equal(t, "0001\n0002\n0003", output) + equals(t, "0001\n0002\n0003", output) }) } @@ -29,7 +28,7 @@ func TestKeys(t *testing.T) { func TestKeysDBNotFound(t *testing.T) { SetTestMode(true) output := run("keys", "no/such/db", "widgets") - assert.Equal(t, "stat no/such/db: no such file or directory", output) + equals(t, "stat no/such/db: no such file or directory", output) } // Ensure that an error is reported if the bucket is not found. @@ -38,6 +37,6 @@ func TestKeysBucketNotFound(t *testing.T) { open(func(db *bolt.DB, path string) { db.Close() output := run("keys", path, "widgets") - assert.Equal(t, "bucket not found: widgets", output) + equals(t, "bucket not found: widgets", output) }) } diff --git a/cmd/bolt/main_test.go b/cmd/bolt/main_test.go index 0614d43..4448d6e 100644 --- a/cmd/bolt/main_test.go +++ b/cmd/bolt/main_test.go @@ -1,9 +1,14 @@ package main_test import ( + "fmt" "io/ioutil" "os" + "path/filepath" + "reflect" + "runtime" "strings" + "testing" "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" @@ -35,3 +40,30 @@ func tempfile() string { os.Remove(f.Name()) return f.Name() } + +// assert fails the test if the condition is false. +func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) + tb.FailNow() + } +} + +// ok fails the test if an err is not nil. +func ok(tb testing.TB, err error) { + if err != nil { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) + tb.FailNow() + } +} + +// equals fails the test if exp is not equal to act. +func equals(tb testing.TB, exp, act interface{}) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(1) + fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + tb.FailNow() + } +} diff --git a/cmd/bolt/stats_test.go b/cmd/bolt/stats_test.go index 2ad5d51..44ed434 100644 --- a/cmd/bolt/stats_test.go +++ b/cmd/bolt/stats_test.go @@ -7,7 +7,6 @@ import ( "github.com/boltdb/bolt" . "github.com/boltdb/bolt/cmd/bolt" - "github.com/stretchr/testify/assert" ) func TestStats(t *testing.T) { @@ -40,7 +39,7 @@ func TestStats(t *testing.T) { }) db.Close() output := run("stats", path, "b") - assert.Equal(t, "Aggregate statistics for 2 buckets\n\n"+ + equals(t, "Aggregate statistics for 2 buckets\n\n"+ "Page count statistics\n"+ "\tNumber of logical branch pages: 0\n"+ "\tNumber of physical branch overflow pages: 0\n"+ diff --git a/cursor_test.go b/cursor_test.go index edca0f0..6957a29 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -8,7 +8,6 @@ import ( "testing/quick" "github.com/boltdb/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that a cursor can return a reference to the bucket that created it. @@ -18,7 +17,7 @@ func TestCursor_Bucket(t *testing.T) { db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucket([]byte("widgets")) c := b.Cursor() - assert.Equal(t, b, c.Bucket()) + equals(t, b, c.Bucket()) return nil }) } @@ -29,12 +28,12 @@ func TestCursor_Seek(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) - assert.NoError(t, b.Put([]byte("foo"), []byte("0001"))) - assert.NoError(t, b.Put([]byte("bar"), []byte("0002"))) - assert.NoError(t, b.Put([]byte("baz"), []byte("0003"))) + ok(t, err) + ok(t, b.Put([]byte("foo"), []byte("0001"))) + ok(t, b.Put([]byte("bar"), []byte("0002"))) + ok(t, b.Put([]byte("baz"), []byte("0003"))) _, err = b.CreateBucket([]byte("bkt")) - assert.NoError(t, err) + ok(t, err) return nil }) db.View(func(tx *bolt.Tx) error { @@ -42,28 +41,28 @@ func TestCursor_Seek(t *testing.T) { // Exact match should go to the key. k, v := c.Seek([]byte("bar")) - assert.Equal(t, []byte("bar"), k) - assert.Equal(t, []byte("0002"), v) + equals(t, []byte("bar"), k) + equals(t, []byte("0002"), v) // Inexact match should go to the next key. k, v = c.Seek([]byte("bas")) - assert.Equal(t, []byte("baz"), k) - assert.Equal(t, []byte("0003"), v) + equals(t, []byte("baz"), k) + equals(t, []byte("0003"), v) // Low key should go to the first key. k, v = c.Seek([]byte("")) - assert.Equal(t, []byte("bar"), k) - assert.Equal(t, []byte("0002"), v) + equals(t, []byte("bar"), k) + equals(t, []byte("0002"), v) // High key should return no key. k, v = c.Seek([]byte("zzz")) - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") // Buckets should return their key but no value. k, v = c.Seek([]byte("bkt")) - assert.Equal(t, []byte("bkt"), k) - assert.Nil(t, v) + equals(t, []byte("bkt"), k) + assert(t, v == nil, "") return nil }) @@ -98,13 +97,13 @@ func TestCursor_Delete(t *testing.T) { } c.Seek([]byte("sub")) err := c.Delete() - assert.Equal(t, err, bolt.ErrIncompatibleValue) + equals(t, err, bolt.ErrIncompatibleValue) return nil }) db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - assert.Equal(t, b.Stats().KeyN, count/2+1) + equals(t, b.Stats().KeyN, count/2+1) return nil }) } @@ -144,16 +143,16 @@ func TestCursor_Seek_Large(t *testing.T) { // The last seek is beyond the end of the the range so // it should return nil. if i == count-1 { - assert.Nil(t, k) + assert(t, k == nil, "") continue } // Otherwise we should seek to the exact key or the next key. num := binary.BigEndian.Uint64(k) if i%2 == 0 { - assert.Equal(t, uint64(i), num) + equals(t, uint64(i), num) } else { - assert.Equal(t, uint64(i+1), num) + equals(t, uint64(i+1), num) } } @@ -172,8 +171,8 @@ func TestCursor_EmptyBucket(t *testing.T) { db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") return nil }) } @@ -190,8 +189,8 @@ func TestCursor_EmptyBucketReverse(t *testing.T) { db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.Last() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") return nil }) } @@ -212,24 +211,24 @@ func TestCursor_Iterate_Leaf(t *testing.T) { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() - assert.Equal(t, string(k), "bar") - assert.Equal(t, v, []byte{1}) + equals(t, string(k), "bar") + equals(t, v, []byte{1}) k, v = c.Next() - assert.Equal(t, string(k), "baz") - assert.Equal(t, v, []byte{}) + equals(t, string(k), "baz") + equals(t, v, []byte{}) k, v = c.Next() - assert.Equal(t, string(k), "foo") - assert.Equal(t, v, []byte{0}) + equals(t, string(k), "foo") + equals(t, v, []byte{0}) k, v = c.Next() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") k, v = c.Next() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") tx.Rollback() } @@ -250,24 +249,24 @@ func TestCursor_LeafRootReverse(t *testing.T) { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.Last() - assert.Equal(t, string(k), "foo") - assert.Equal(t, v, []byte{0}) + equals(t, string(k), "foo") + equals(t, v, []byte{0}) k, v = c.Prev() - assert.Equal(t, string(k), "baz") - assert.Equal(t, v, []byte{}) + equals(t, string(k), "baz") + equals(t, v, []byte{}) k, v = c.Prev() - assert.Equal(t, string(k), "bar") - assert.Equal(t, v, []byte{1}) + equals(t, string(k), "bar") + equals(t, v, []byte{1}) k, v = c.Prev() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") k, v = c.Prev() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") tx.Rollback() } @@ -288,16 +287,16 @@ func TestCursor_Restart(t *testing.T) { c := tx.Bucket([]byte("widgets")).Cursor() k, _ := c.First() - assert.Equal(t, string(k), "bar") + equals(t, string(k), "bar") k, _ = c.Next() - assert.Equal(t, string(k), "foo") + equals(t, string(k), "foo") k, _ = c.First() - assert.Equal(t, string(k), "bar") + equals(t, string(k), "bar") k, _ = c.Next() - assert.Equal(t, string(k), "foo") + equals(t, string(k), "foo") tx.Rollback() } @@ -313,9 +312,9 @@ func TestCursor_QuickCheck(t *testing.T) { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) + ok(t, b.Put(item.Key, item.Value)) } - assert.NoError(t, tx.Commit()) + ok(t, tx.Commit()) // Sort test data. sort.Sort(items) @@ -325,11 +324,11 @@ func TestCursor_QuickCheck(t *testing.T) { tx, _ = db.Begin(false) c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - assert.Equal(t, k, items[index].Key) - assert.Equal(t, v, items[index].Value) + equals(t, k, items[index].Key) + equals(t, v, items[index].Value) index++ } - assert.Equal(t, len(items), index) + equals(t, len(items), index) tx.Rollback() return true @@ -350,9 +349,9 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) for _, item := range items { - assert.NoError(t, b.Put(item.Key, item.Value)) + ok(t, b.Put(item.Key, item.Value)) } - assert.NoError(t, tx.Commit()) + ok(t, tx.Commit()) // Sort test data. sort.Sort(revtestdata(items)) @@ -362,11 +361,11 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) { tx, _ = db.Begin(false) c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - assert.Equal(t, k, items[index].Key) - assert.Equal(t, v, items[index].Value) + equals(t, k, items[index].Key) + equals(t, v, items[index].Value) index++ } - assert.Equal(t, len(items), index) + equals(t, len(items), index) tx.Rollback() return true @@ -383,13 +382,13 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) + ok(t, err) _, err = b.CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) _, err = b.CreateBucket([]byte("bar")) - assert.NoError(t, err) + ok(t, err) _, err = b.CreateBucket([]byte("baz")) - assert.NoError(t, err) + ok(t, err) return nil }) db.View(func(tx *bolt.Tx) error { @@ -397,9 +396,9 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { names = append(names, string(k)) - assert.Nil(t, v) + assert(t, v == nil, "") } - assert.Equal(t, names, []string{"bar", "baz", "foo"}) + equals(t, names, []string{"bar", "baz", "foo"}) return nil }) } @@ -411,13 +410,13 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert.NoError(t, err) + ok(t, err) _, err = b.CreateBucket([]byte("foo")) - assert.NoError(t, err) + ok(t, err) _, err = b.CreateBucket([]byte("bar")) - assert.NoError(t, err) + ok(t, err) _, err = b.CreateBucket([]byte("baz")) - assert.NoError(t, err) + ok(t, err) return nil }) db.View(func(tx *bolt.Tx) error { @@ -425,9 +424,9 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil; k, v = c.Prev() { names = append(names, string(k)) - assert.Nil(t, v) + assert(t, v == nil, "") } - assert.Equal(t, names, []string{"foo", "baz", "bar"}) + equals(t, names, []string{"foo", "baz", "bar"}) return nil }) } diff --git a/db_test.go b/db_test.go index 22eaea7..e9da776 100644 --- a/db_test.go +++ b/db_test.go @@ -14,7 +14,6 @@ import ( "time" "github.com/boltdb/bolt" - "github.com/stretchr/testify/assert" ) var statsFlag = flag.Bool("stats", false, "show performance stats") @@ -22,8 +21,8 @@ var statsFlag = flag.Bool("stats", false, "show performance stats") // Ensure that opening a database with a bad path returns an error. func TestOpen_BadPath(t *testing.T) { db, err := bolt.Open("", 0666, nil) - assert.Error(t, err) - assert.Nil(t, db) + assert(t, err != nil, "err: %s", err) + assert(t, db == nil, "") } // Ensure that a database can be opened without error. @@ -31,10 +30,10 @@ func TestOpen(t *testing.T) { path := tempfile() defer os.Remove(path) db, err := bolt.Open(path, 0666, nil) - assert.NotNil(t, db) - assert.NoError(t, err) - assert.Equal(t, db.Path(), path) - assert.NoError(t, db.Close()) + assert(t, db != nil, "") + ok(t, err) + equals(t, db.Path(), path) + ok(t, db.Close()) } // Ensure that opening an already open database file will timeout. @@ -48,15 +47,15 @@ func TestOpen_Timeout(t *testing.T) { // Open a data file. db0, err := bolt.Open(path, 0666, nil) - assert.NotNil(t, db0) - assert.NoError(t, err) + assert(t, db0 != nil, "") + ok(t, err) // Attempt to open the database again. start := time.Now() db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - assert.Nil(t, db1) - assert.Equal(t, bolt.ErrTimeout, err) - assert.True(t, time.Since(start) > 100*time.Millisecond) + assert(t, db1 == nil, "") + equals(t, bolt.ErrTimeout, err) + assert(t, time.Since(start) > 100*time.Millisecond, "") db0.Close() } @@ -72,8 +71,8 @@ func TestOpen_Wait(t *testing.T) { // Open a data file. db0, err := bolt.Open(path, 0666, nil) - assert.NotNil(t, db0) - assert.NoError(t, err) + assert(t, db0 != nil, "") + ok(t, err) // Close it in just a bit. time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) @@ -81,9 +80,9 @@ func TestOpen_Wait(t *testing.T) { // Attempt to open the database again. start := time.Now() db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - assert.NotNil(t, db1) - assert.NoError(t, err) - assert.True(t, time.Since(start) > 100*time.Millisecond) + assert(t, db1 != nil, "") + ok(t, err) + assert(t, time.Since(start) > 100*time.Millisecond, "") } // Ensure that a re-opened database is consistent. @@ -92,13 +91,13 @@ func TestOpen_Check(t *testing.T) { defer os.Remove(path) db, err := bolt.Open(path, 0666, nil) - assert.NoError(t, err) - assert.NoError(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + ok(t, err) + ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) db.Close() db, err = bolt.Open(path, 0666, nil) - assert.NoError(t, err) - assert.NoError(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) + ok(t, err) + ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) db.Close() } @@ -108,10 +107,9 @@ func TestDB_Open_FileError(t *testing.T) { defer os.Remove(path) _, err := bolt.Open(path+"/youre-not-my-real-parent", 0666, nil) - if err, _ := err.(*os.PathError); assert.Error(t, err) { - assert.Equal(t, path+"/youre-not-my-real-parent", err.Path) - assert.Equal(t, "open", err.Op) - } + assert(t, err.(*os.PathError) != nil, "") + equals(t, path+"/youre-not-my-real-parent", err.(*os.PathError).Path) + equals(t, "open", err.(*os.PathError).Op) } // Ensure that write errors to the meta file handler during initialization are returned. @@ -125,87 +123,24 @@ func TestDB_Open_FileTooSmall(t *testing.T) { defer os.Remove(path) db, err := bolt.Open(path, 0666, nil) - assert.NoError(t, err) + ok(t, err) db.Close() // corrupt the database - assert.NoError(t, os.Truncate(path, int64(os.Getpagesize()))) + ok(t, os.Truncate(path, int64(os.Getpagesize()))) db, err = bolt.Open(path, 0666, nil) - assert.Equal(t, errors.New("file size too small"), err) + equals(t, errors.New("file size too small"), err) } // TODO(benbjohnson): Test corruption at every byte of the first two pages. -/* -// Ensure that corrupt meta0 page errors get returned. -func TestDB_Open_CorruptMeta0(t *testing.T) { - var m meta - m.magic = magic - m.version = version - m.pageSize = 0x8000 - - path := tempfile() - defer os.Remove(path) - - // Create a file with bad magic. - b := make([]byte, 0x10000) - p0, p1 := (*page)(unsafe.Pointer(&b[0x0000])), (*page)(unsafe.Pointer(&b[0x8000])) - p0.meta().magic = 0 - p0.meta().version = version - p1.meta().magic = magic - p1.meta().version = version - err := ioutil.WriteFile(path, b, 0666) - assert.NoError(t, err) - - // Open the database. - _, err = bolt.Open(path, 0666, nil) - assert.Equal(t, err, errors.New("meta0 error: invalid database")) -} - -// Ensure that a corrupt meta page checksum causes the open to fail. -func TestDB_Open_MetaChecksumError(t *testing.T) { - for i := 0; i < 2; i++ { - path := tempfile() - defer os.Remove(path) - - db, err := bolt.Open(path, 0600, nil) - pageSize := db.pageSize - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("woojits")) - return err - }) - db.Close() - - // Change a single byte in the meta page. - f, _ := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600) - f.WriteAt([]byte{1}, int64((i*pageSize)+(pageHeaderSize+12))) - f.Sync() - f.Close() - - // Reopen the database. - _, err = bolt.Open(path, 0600, nil) - if assert.Error(t, err) { - if i == 0 { - assert.Equal(t, "meta0 error: checksum error", err.Error()) - } else { - assert.Equal(t, "meta1 error: checksum error", err.Error()) - } - } - } -} -*/ - // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_DatabaseNotOpen(t *testing.T) { var db bolt.DB tx, err := db.Begin(false) - assert.Nil(t, tx) - assert.Equal(t, err, bolt.ErrDatabaseNotOpen) + assert(t, tx == nil, "") + equals(t, err, bolt.ErrDatabaseNotOpen) } // Ensure that a read-write transaction can be retrieved. @@ -213,19 +148,19 @@ func TestDB_BeginRW(t *testing.T) { db := NewTestDB() defer db.Close() tx, err := db.Begin(true) - assert.NotNil(t, tx) - assert.NoError(t, err) - assert.Equal(t, tx.DB(), db) - assert.Equal(t, tx.Writable(), true) - assert.NoError(t, tx.Commit()) + assert(t, tx != nil, "") + ok(t, err) + assert(t, tx.DB() == db.DB, "") + equals(t, tx.Writable(), true) + ok(t, tx.Commit()) } // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB tx, err := db.Begin(true) - assert.Equal(t, err, bolt.ErrDatabaseNotOpen) - assert.Nil(t, tx) + equals(t, err, bolt.ErrDatabaseNotOpen) + assert(t, tx == nil, "") } // Ensure a database can provide a transactional block. @@ -240,13 +175,13 @@ func TestDB_Update(t *testing.T) { b.Delete([]byte("foo")) return nil }) - assert.NoError(t, err) + ok(t, err) err = db.View(func(tx *bolt.Tx) error { - assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) return nil }) - assert.NoError(t, err) + ok(t, err) } // Ensure a closed database returns an error while running a transaction block @@ -256,23 +191,87 @@ func TestDB_Update_Closed(t *testing.T) { tx.CreateBucket([]byte("widgets")) return nil }) - assert.Equal(t, err, bolt.ErrDatabaseNotOpen) + equals(t, err, bolt.ErrDatabaseNotOpen) } // Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommitAndRollback(t *testing.T) { - var db bolt.DB +func TestDB_Update_ManualCommit(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - assert.Panics(t, func() { tx.Commit() }) - assert.Panics(t, func() { tx.Rollback() }) + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Commit() + }() return nil }) - db.View(func(tx *bolt.Tx) error { - assert.Panics(t, func() { tx.Commit() }) - assert.Panics(t, func() { tx.Rollback() }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_Update_ManualRollback(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Rollback() + }() return nil }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to commit a managed transaction. +func TestDB_View_ManualCommit(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Commit() + }() + return nil + }) + assert(t, ok, "expected panic") +} + +// Ensure a panic occurs while trying to rollback a managed transaction. +func TestDB_View_ManualRollback(t *testing.T) { + db := NewTestDB() + defer db.Close() + + var ok bool + db.Update(func(tx *bolt.Tx) error { + func() { + defer func() { + if r := recover(); r != nil { + ok = true + } + }() + tx.Rollback() + }() + return nil + }) + assert(t, ok, "expected panic") } // Ensure a write transaction that panics does not hold open locks. @@ -297,11 +296,11 @@ func TestDB_Update_Panic(t *testing.T) { _, err := tx.CreateBucket([]byte("widgets")) return err }) - assert.NoError(t, err) + ok(t, err) // Verify that our change persisted. err = db.Update(func(tx *bolt.Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) != nil, "") return nil }) } @@ -313,7 +312,7 @@ func TestDB_View_Error(t *testing.T) { err := db.View(func(tx *bolt.Tx) error { return errors.New("xxx") }) - assert.Equal(t, errors.New("xxx"), err) + equals(t, errors.New("xxx"), err) } // Ensure a read transaction that panics does not hold open locks. @@ -332,14 +331,14 @@ func TestDB_View_Panic(t *testing.T) { } }() db.View(func(tx *bolt.Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) != nil, "") panic("omg") }) }() // Verify that we can still use read transactions. db.View(func(tx *bolt.Tx) error { - assert.NotNil(t, tx.Bucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) != nil, "") return nil }) } @@ -358,9 +357,9 @@ func TestDB_Stats(t *testing.T) { return err }) stats := db.Stats() - assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount") - assert.Equal(t, 0, stats.FreePageN, "FreePageN") - assert.Equal(t, 2, stats.PendingPageN, "PendingPageN") + equals(t, 2, stats.TxStats.PageCount) + equals(t, 0, stats.FreePageN) + equals(t, 2, stats.PendingPageN) } // Ensure that database pages are in expected order and type. @@ -374,31 +373,37 @@ func TestDB_Consistency(t *testing.T) { for i := 0; i < 10; i++ { db.Update(func(tx *bolt.Tx) error { - assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) return nil }) } db.Update(func(tx *bolt.Tx) error { - if p, _ := tx.Page(0); assert.NotNil(t, p) { - assert.Equal(t, "meta", p.Type) - } - if p, _ := tx.Page(1); assert.NotNil(t, p) { - assert.Equal(t, "meta", p.Type) - } - if p, _ := tx.Page(2); assert.NotNil(t, p) { - assert.Equal(t, "free", p.Type) - } - if p, _ := tx.Page(3); assert.NotNil(t, p) { - assert.Equal(t, "free", p.Type) - } - if p, _ := tx.Page(4); assert.NotNil(t, p) { - assert.Equal(t, "leaf", p.Type) // root leaf - } - if p, _ := tx.Page(5); assert.NotNil(t, p) { - assert.Equal(t, "freelist", p.Type) - } - p, _ := tx.Page(6) - assert.Nil(t, p) + p, _ := tx.Page(0) + assert(t, p != nil, "") + equals(t, "meta", p.Type) + + p, _ = tx.Page(1) + assert(t, p != nil, "") + equals(t, "meta", p.Type) + + p, _ = tx.Page(2) + assert(t, p != nil, "") + equals(t, "free", p.Type) + + p, _ = tx.Page(3) + assert(t, p != nil, "") + equals(t, "free", p.Type) + + p, _ = tx.Page(4) + assert(t, p != nil, "") + equals(t, "leaf", p.Type) + + p, _ = tx.Page(5) + assert(t, p != nil, "") + equals(t, "freelist", p.Type) + + p, _ = tx.Page(6) + assert(t, p == nil, "") return nil }) } @@ -411,9 +416,9 @@ func TestDBStats_Sub(t *testing.T) { b.TxStats.PageCount = 10 b.FreePageN = 14 diff := b.Sub(&a) - assert.Equal(t, 7, diff.TxStats.PageCount) + equals(t, 7, diff.TxStats.PageCount) // free page stats are copied from the receiver and not subtracted - assert.Equal(t, 14, diff.FreePageN) + equals(t, 14, diff.FreePageN) } func ExampleDB_Update() { diff --git a/freelist_test.go b/freelist_test.go index 24ce0f6..792ca92 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -1,24 +1,27 @@ package bolt import ( + "reflect" "testing" "unsafe" - - "github.com/stretchr/testify/assert" ) // Ensure that a page is added to a transaction's freelist. func TestFreelist_free(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12}) - assert.Equal(t, f.pending[100], []pgid{12}) + if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) + } } // Ensure that a page and its overflow is added to a transaction's freelist. func TestFreelist_free_overflow(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12, overflow: 3}) - assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15}) + if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { + t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) + } } // Ensure that a transaction's free pages can be released. @@ -29,25 +32,56 @@ func TestFreelist_release(t *testing.T) { f.free(102, &page{id: 39}) f.release(100) f.release(101) - assert.Equal(t, []pgid{9, 12, 13}, f.ids) + if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + f.release(102) - assert.Equal(t, []pgid{9, 12, 13, 39}, f.ids) + if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } } // Ensure that a freelist can find contiguous blocks of pages. func TestFreelist_allocate(t *testing.T) { f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - assert.Equal(t, 3, int(f.allocate(3))) - assert.Equal(t, 6, int(f.allocate(1))) - assert.Equal(t, 0, int(f.allocate(3))) - assert.Equal(t, 12, int(f.allocate(2))) - assert.Equal(t, 7, int(f.allocate(1))) - assert.Equal(t, 0, int(f.allocate(0))) - assert.Equal(t, []pgid{9, 18}, f.ids) - assert.Equal(t, 9, int(f.allocate(1))) - assert.Equal(t, 18, int(f.allocate(1))) - assert.Equal(t, 0, int(f.allocate(1))) - assert.Equal(t, []pgid{}, f.ids) + if id := int(f.allocate(3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.allocate(1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.allocate(3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.allocate(1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.allocate(0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } + + if id := int(f.allocate(1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.allocate(1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.allocate(1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } } // Ensure that a freelist can deserialize from a freelist page. @@ -68,9 +102,9 @@ func TestFreelist_read(t *testing.T) { f.read(page) // Ensure that there are two page ids in the freelist. - assert.Equal(t, len(f.ids), 2) - assert.Equal(t, f.ids[0], pgid(23)) - assert.Equal(t, f.ids[1], pgid(50)) + if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { + t.Fatalf("exp=%v; got=%v", exp, f.ids) + } } // Ensure that a freelist can serialize into a freelist page. @@ -89,10 +123,7 @@ func TestFreelist_write(t *testing.T) { // Ensure that the freelist is correct. // All pages should be present and in reverse order. - assert.Equal(t, len(f2.ids), 5) - assert.Equal(t, f2.ids[0], pgid(3)) - assert.Equal(t, f2.ids[1], pgid(11)) - assert.Equal(t, f2.ids[2], pgid(12)) - assert.Equal(t, f2.ids[3], pgid(28)) - assert.Equal(t, f2.ids[4], pgid(39)) + if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { + t.Fatalf("exp=%v; got=%v", exp, f2.ids) + } } diff --git a/node_test.go b/node_test.go index b85e18f..f4bf4af 100644 --- a/node_test.go +++ b/node_test.go @@ -3,8 +3,6 @@ package bolt import ( "testing" "unsafe" - - "github.com/stretchr/testify/assert" ) // Ensure that a node can insert a key/value. @@ -14,14 +12,22 @@ func TestNode_put(t *testing.T) { n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - assert.Equal(t, len(n.inodes), 3) - assert.Equal(t, n.inodes[0].key, []byte("bar")) - assert.Equal(t, n.inodes[0].value, []byte("1")) - assert.Equal(t, n.inodes[1].key, []byte("baz")) - assert.Equal(t, n.inodes[1].value, []byte("2")) - assert.Equal(t, n.inodes[2].key, []byte("foo")) - assert.Equal(t, n.inodes[2].value, []byte("3")) - assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag)) + + if len(n.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if n.inodes[2].flags != uint32(leafPageFlag) { + t.Fatalf("not a leaf: %d", n.inodes[2].flags) + } } // Ensure that a node can deserialize from a leaf page. @@ -47,12 +53,18 @@ func TestNode_read_LeafPage(t *testing.T) { n.read(page) // Check that there are two inodes with correct data. - assert.True(t, n.isLeaf) - assert.Equal(t, len(n.inodes), 2) - assert.Equal(t, n.inodes[0].key, []byte("bar")) - assert.Equal(t, n.inodes[0].value, []byte("fooz")) - assert.Equal(t, n.inodes[1].key, []byte("helloworld")) - assert.Equal(t, n.inodes[1].value, []byte("bye")) + if !n.isLeaf { + t.Fatalf("expected leaf", n.isLeaf) + } + if len(n.inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(n.inodes)) + } + if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } } // Ensure that a node can serialize into a leaf page. @@ -73,13 +85,18 @@ func TestNode_write_LeafPage(t *testing.T) { n2.read(p) // Check that the two pages are the same. - assert.Equal(t, len(n2.inodes), 3) - assert.Equal(t, n2.inodes[0].key, []byte("john")) - assert.Equal(t, n2.inodes[0].value, []byte("johnson")) - assert.Equal(t, n2.inodes[1].key, []byte("ricki")) - assert.Equal(t, n2.inodes[1].value, []byte("lake")) - assert.Equal(t, n2.inodes[2].key, []byte("susy")) - assert.Equal(t, n2.inodes[2].value, []byte("que")) + if len(n2.inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(n2.inodes)) + } + if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } + if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { + t.Fatalf("exp=; got=<%s,%s>", k, v) + } } // Ensure that a node can split into appropriate subgroups. @@ -96,9 +113,15 @@ func TestNode_split(t *testing.T) { n.split(100) var parent = n.parent - assert.Equal(t, len(parent.children), 2) - assert.Equal(t, len(parent.children[0].inodes), 2) - assert.Equal(t, len(parent.children[1].inodes), 3) + if len(parent.children) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children)) + } + if len(parent.children[0].inodes) != 2 { + t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) + } + if len(parent.children[1].inodes) != 3 { + t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) + } } // Ensure that a page with the minimum number of inodes just returns a single node. @@ -110,7 +133,9 @@ func TestNode_split_MinKeys(t *testing.T) { // Split. n.split(20) - assert.Nil(t, n.parent) + if n.parent != nil { + t.Fatalf("expected nil parent") + } } // Ensure that a node that has keys that all fit on a page just returns one leaf. @@ -125,5 +150,7 @@ func TestNode_split_SinglePage(t *testing.T) { // Split. n.split(4096) - assert.Nil(t, n.parent) + if n.parent != nil { + t.Fatalf("expected nil parent") + } } diff --git a/page_test.go b/page_test.go index be90096..7a4d327 100644 --- a/page_test.go +++ b/page_test.go @@ -1,17 +1,26 @@ package bolt import ( - "github.com/stretchr/testify/assert" "testing" ) // Ensure that the page type can be returned in human readable format. func TestPage_typ(t *testing.T) { - assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch") - assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf") - assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta") - assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist") - assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>") + if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { + t.Fatalf("exp=branch; got=%v", typ) + } + if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { + t.Fatalf("exp=leaf; got=%v", typ) + } + if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { + t.Fatalf("exp=meta; got=%v", typ) + } + if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { + t.Fatalf("exp=freelist; got=%v", typ) + } + if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { + t.Fatalf("exp=unknown<4e20>; got=%v", typ) + } } // Ensure that the hexdump debugging function doesn't blow up. diff --git a/simulation_test.go b/simulation_test.go index 1c2aaf8..ceb8bae 100644 --- a/simulation_test.go +++ b/simulation_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/boltdb/bolt" - "github.com/stretchr/testify/assert" ) func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) } @@ -90,7 +89,7 @@ func testSimulate(t *testing.T, threadCount, parallelism int) { versions[tx.ID()] = qdb mutex.Unlock() - assert.NoError(t, tx.Commit()) + ok(t, tx.Commit()) }() } else { defer tx.Rollback() diff --git a/tx_test.go b/tx_test.go index 1046842..39f50c4 100644 --- a/tx_test.go +++ b/tx_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/boltdb/bolt" - "github.com/stretchr/testify/assert" ) // Ensure that committing a closed transaction returns an error. @@ -16,8 +15,8 @@ func TestTx_Commit_Closed(t *testing.T) { defer db.Close() tx, _ := db.Begin(true) tx.CreateBucket([]byte("foo")) - assert.NoError(t, tx.Commit()) - assert.Equal(t, tx.Commit(), bolt.ErrTxClosed) + ok(t, tx.Commit()) + equals(t, tx.Commit(), bolt.ErrTxClosed) } // Ensure that rolling back a closed transaction returns an error. @@ -25,8 +24,8 @@ func TestTx_Rollback_Closed(t *testing.T) { db := NewTestDB() defer db.Close() tx, _ := db.Begin(true) - assert.NoError(t, tx.Rollback()) - assert.Equal(t, tx.Rollback(), bolt.ErrTxClosed) + ok(t, tx.Rollback()) + equals(t, tx.Rollback(), bolt.ErrTxClosed) } // Ensure that committing a read-only transaction returns an error. @@ -34,7 +33,7 @@ func TestTx_Commit_ReadOnly(t *testing.T) { db := NewTestDB() defer db.Close() tx, _ := db.Begin(false) - assert.Equal(t, tx.Commit(), bolt.ErrTxNotWritable) + equals(t, tx.Commit(), bolt.ErrTxNotWritable) } // Ensure that a transaction can retrieve a cursor on the root bucket. @@ -47,16 +46,16 @@ func TestTx_Cursor(t *testing.T) { c := tx.Cursor() k, v := c.First() - assert.Equal(t, "widgets", string(k)) - assert.Nil(t, v) + equals(t, "widgets", string(k)) + assert(t, v == nil, "") k, v = c.Next() - assert.Equal(t, "woojits", string(k)) - assert.Nil(t, v) + equals(t, "woojits", string(k)) + assert(t, v == nil, "") k, v = c.Next() - assert.Nil(t, k) - assert.Nil(t, v) + assert(t, k == nil, "") + assert(t, v == nil, "") return nil }) @@ -68,8 +67,8 @@ func TestTx_CreateBucket_ReadOnly(t *testing.T) { defer db.Close() db.View(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("foo")) - assert.Nil(t, b) - assert.Equal(t, bolt.ErrTxNotWritable, err) + assert(t, b == nil, "") + equals(t, bolt.ErrTxNotWritable, err) return nil }) } @@ -81,8 +80,8 @@ func TestTx_CreateBucket_Closed(t *testing.T) { tx, _ := db.Begin(true) tx.Commit() b, err := tx.CreateBucket([]byte("foo")) - assert.Nil(t, b) - assert.Equal(t, bolt.ErrTxClosed, err) + assert(t, b == nil, "") + equals(t, bolt.ErrTxClosed, err) } // Ensure that a Tx can retrieve a bucket. @@ -92,7 +91,7 @@ func TestTx_Bucket(t *testing.T) { db.Update(func(tx *bolt.Tx) error { tx.CreateBucket([]byte("widgets")) b := tx.Bucket([]byte("widgets")) - assert.NotNil(t, b) + assert(t, b != nil, "") return nil }) } @@ -105,7 +104,7 @@ func TestTx_Get_Missing(t *testing.T) { tx.CreateBucket([]byte("widgets")) tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) - assert.Nil(t, value) + assert(t, value == nil, "") return nil }) } @@ -118,15 +117,15 @@ func TestTx_CreateBucket(t *testing.T) { // Create a bucket. db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) + assert(t, b != nil, "") + ok(t, err) return nil }) // Read the bucket through a separate transaction. db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - assert.NotNil(t, b) + assert(t, b != nil, "") return nil }) } @@ -137,27 +136,27 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) + assert(t, b != nil, "") + ok(t, err) b, err = tx.CreateBucketIfNotExists([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) + assert(t, b != nil, "") + ok(t, err) b, err = tx.CreateBucketIfNotExists([]byte{}) - assert.Nil(t, b) - assert.Equal(t, bolt.ErrBucketNameRequired, err) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) b, err = tx.CreateBucketIfNotExists(nil) - assert.Nil(t, b) - assert.Equal(t, bolt.ErrBucketNameRequired, err) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) return nil }) // Read the bucket through a separate transaction. db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - assert.NotNil(t, b) + assert(t, b != nil, "") return nil }) } @@ -169,16 +168,16 @@ func TestTx_CreateBucket_Exists(t *testing.T) { // Create a bucket. db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) + assert(t, b != nil, "") + ok(t, err) return nil }) // Create the same bucket again. db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert.Nil(t, b) - assert.Equal(t, bolt.ErrBucketExists, err) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketExists, err) return nil }) } @@ -189,8 +188,8 @@ func TestTx_CreateBucket_NameRequired(t *testing.T) { defer db.Close() db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket(nil) - assert.Nil(t, b) - assert.Equal(t, bolt.ErrBucketNameRequired, err) + assert(t, b == nil, "") + equals(t, bolt.ErrBucketNameRequired, err) return nil }) } @@ -209,17 +208,17 @@ func TestTx_DeleteBucket(t *testing.T) { // Delete the bucket and make sure we can't get the value. db.Update(func(tx *bolt.Tx) error { - assert.NoError(t, tx.DeleteBucket([]byte("widgets"))) - assert.Nil(t, tx.Bucket([]byte("widgets"))) + ok(t, tx.DeleteBucket([]byte("widgets"))) + assert(t, tx.Bucket([]byte("widgets")) == nil, "") return nil }) db.Update(func(tx *bolt.Tx) error { // Create the bucket again and make sure there's not a phantom value. b, err := tx.CreateBucket([]byte("widgets")) - assert.NotNil(t, b) - assert.NoError(t, err) - assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo"))) + assert(t, b != nil, "") + ok(t, err) + assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") return nil }) } @@ -230,7 +229,7 @@ func TestTx_DeleteBucket_Closed(t *testing.T) { defer db.Close() tx, _ := db.Begin(true) tx.Commit() - assert.Equal(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) + equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) } // Ensure that deleting a bucket with a read-only transaction returns an error. @@ -238,7 +237,7 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) { db := NewTestDB() defer db.Close() db.View(func(tx *bolt.Tx) error { - assert.Equal(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) + equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) return nil }) } @@ -248,7 +247,7 @@ func TestTx_DeleteBucket_NotFound(t *testing.T) { db := NewTestDB() defer db.Close() db.Update(func(tx *bolt.Tx) error { - assert.Equal(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) + equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) return nil }) } @@ -264,7 +263,7 @@ func TestTx_OnCommit(t *testing.T) { _, err := tx.CreateBucket([]byte("widgets")) return err }) - assert.Equal(t, 3, x) + equals(t, 3, x) } // Ensure that Tx commit handlers are NOT called after a transaction rolls back. @@ -278,7 +277,7 @@ func TestTx_OnCommit_Rollback(t *testing.T) { tx.CreateBucket([]byte("widgets")) return errors.New("rollback this commit") }) - assert.Equal(t, 0, x) + equals(t, 0, x) } // Ensure that the database can be copied to a file path. @@ -293,15 +292,15 @@ func TestTx_CopyFile(t *testing.T) { return nil }) - assert.NoError(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) + ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) db2, err := bolt.Open(dest, 0600, nil) - assert.NoError(t, err) + ok(t, err) defer db2.Close() db2.View(func(tx *bolt.Tx) error { - assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) + equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) return nil }) } @@ -339,7 +338,7 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) { }) err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) - assert.EqualError(t, err, "meta copy: error injected for tests") + equals(t, err.Error(), "meta copy: error injected for tests") } // Ensure that Copy handles write errors right. @@ -354,7 +353,7 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) { }) err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) - assert.EqualError(t, err, "error injected for tests") + equals(t, err.Error(), "error injected for tests") } func ExampleTx_Rollback() {