diff --git a/Makefile b/Makefile index cfbed51..e035e63 100644 --- a/Makefile +++ b/Makefile @@ -1,54 +1,18 @@ -TEST=. -BENCH=. -COVERPROFILE=/tmp/c.out BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" default: build -bench: - go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) - -# http://cloc.sourceforge.net/ -cloc: - @cloc --not-match-f='Makefile|_test.go' . - -cover: fmt - go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . - go tool cover -html=$(COVERPROFILE) - rm $(COVERPROFILE) - -cpuprofile: fmt - @go test -c - @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" # go get github.com/kisielk/errcheck errcheck: - @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt -fmt: - @go fmt ./... +test: + @go test -v -cover . + @go test -v ./cmd/bolt -get: - @go get -d ./... - -build: get - @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt - -test: fmt - @go get github.com/stretchr/testify/assert - @echo "=== TESTS ===" - @go test -v -cover -test.run=$(TEST) - @echo "" - @echo "" - @echo "=== CLI ===" - @go test -v -test.run=$(TEST) ./cmd/bolt - @echo "" - @echo "" - @echo "=== RACE DETECTOR ===" - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -.PHONY: bench cloc cover cpuprofile fmt memprofile test +.PHONY: fmt test diff --git a/batch.go b/batch.go deleted file mode 100644 index 84acae6..0000000 --- a/batch.go +++ /dev/null @@ -1,138 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} diff --git a/batch_benchmark_test.go b/batch_benchmark_test.go deleted file mode 100644 index b745a37..0000000 --- a/batch_benchmark_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "hash/fnv" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func validateBatchBench(b *testing.B, db *TestDB) { - var rollback = errors.New("sentinel error to cause rollback") - validate := func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("bench")) - h := fnv.New32a() - buf := make([]byte, 4) - for id := uint32(0); id < 1000; id++ { - binary.LittleEndian.PutUint32(buf, id) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - v := bucket.Get(k) - if v == nil { - b.Errorf("not found id=%d key=%x", id, k) - continue - } - if g, e := v, []byte("filler"); !bytes.Equal(g, e) { - b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) - } - if err := bucket.Delete(k); err != nil { - return err - } - } - // should be empty now - c := bucket.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - b.Errorf("unexpected key: %x = %q", k, v) - } - return rollback - } - if err := db.Update(validate); err != nil && err != rollback { - b.Error(err) - } -} - -func BenchmarkDBBatchAutomatic(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Batch(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchSingle(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Update(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchManual10x100(b *testing.B) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("bench")) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for major := 0; major < 10; major++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - insert100 := func(tx *bolt.Tx) error { - h := fnv.New32a() - buf := make([]byte, 4) - for minor := uint32(0); minor < 100; minor++ { - binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) - h.Reset() - h.Write(buf[:]) - k := h.Sum(nil) - b := tx.Bucket([]byte("bench")) - if err := b.Put(k, []byte("filler")); err != nil { - return err - } - } - return nil - } - if err := db.Update(insert100); err != nil { - b.Fatal(err) - } - }(uint32(major)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} diff --git a/batch_example_test.go b/batch_example_test.go deleted file mode 100644 index 74eff8a..0000000 --- a/batch_example_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package bolt_test - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "net/http/httptest" - "os" - - "github.com/boltdb/bolt" -) - -// Set this to see how the counts are actually updated. -const verbose = false - -// Counter updates a counter in Bolt for every URL path requested. -type counter struct { - db *bolt.DB -} - -func (c counter) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - // Communicates the new count from a successful database - // transaction. - var result uint64 - - increment := func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("hits")) - if err != nil { - return err - } - key := []byte(req.URL.String()) - // Decode handles key not found for us. - count := decode(b.Get(key)) + 1 - b.Put(key, encode(count)) - // All good, communicate new count. - result = count - return nil - } - if err := c.db.Batch(increment); err != nil { - http.Error(rw, err.Error(), 500) - return - } - - if verbose { - log.Printf("server: %s: %d", req.URL.String(), result) - } - - rw.Header().Set("Content-Type", "application/octet-stream") - fmt.Fprintf(rw, "%d\n", result) -} - -func client(id int, base string, paths []string) error { - // Process paths in random order. - rng := rand.New(rand.NewSource(int64(id))) - permutation := rng.Perm(len(paths)) - - for i := range paths { - path := paths[permutation[i]] - resp, err := http.Get(base + path) - if err != nil { - return err - } - defer resp.Body.Close() - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if verbose { - log.Printf("client: %s: %s", path, buf) - } - } - return nil -} - -func ExampleDB_Batch() { - // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) - defer os.Remove(db.Path()) - defer db.Close() - - // Start our web server - count := counter{db} - srv := httptest.NewServer(count) - defer srv.Close() - - // Decrease the batch size to make things more interesting. - db.MaxBatchSize = 3 - - // Get every path multiple times concurrently. - const clients = 10 - paths := []string{ - "/foo", - "/bar", - "/baz", - "/quux", - "/thud", - "/xyzzy", - } - errors := make(chan error, clients) - for i := 0; i < clients; i++ { - go func(id int) { - errors <- client(id, srv.URL, paths) - }(i) - } - // Check all responses to make sure there's no error. - for i := 0; i < clients; i++ { - if err := <-errors; err != nil { - fmt.Printf("client error: %v", err) - return - } - } - - // Check the final result - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("hits")) - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("hits to %s: %d\n", k, decode(v)) - } - return nil - }) - - // Output: - // hits to /bar: 10 - // hits to /baz: 10 - // hits to /foo: 10 - // hits to /quux: 10 - // hits to /thud: 10 - // hits to /xyzzy: 10 -} - -// encode marshals a counter. -func encode(n uint64) []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, n) - return buf -} - -// decode unmarshals a counter. Nil buffers are decoded as 0. -func decode(buf []byte) uint64 { - if buf == nil { - return 0 - } - return binary.BigEndian.Uint64(buf) -} diff --git a/batch_test.go b/batch_test.go deleted file mode 100644 index 0b5075f..0000000 --- a/batch_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package bolt_test - -import ( - "testing" - "time" - - "github.com/boltdb/bolt" -) - -// Ensure two functions can perform updates in a single batch. -func TestDB_Batch(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - // Iterate over multiple updates in separate goroutines. - n := 2 - ch := make(chan error) - for i := 0; i < n; i++ { - go func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - }(i) - } - - // Check all responses to make sure there's no error. - for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_Batch_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - - var sentinel int - var bork = &sentinel - var problem interface{} - var err error - - // Execute a function inside a batch that panics. - func() { - defer func() { - if p := recover(); p != nil { - problem = p - } - }() - err = db.Batch(func(tx *bolt.Tx) error { - panic(bork) - }) - }() - - // Verify there is no error. - if g, e := err, error(nil); g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } - // Verify the panic was captured. - if g, e := problem, bork; g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } -} - -func TestDB_BatchFull(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 3 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = size - // high enough to never trigger here - db.MaxBatchDelay = 1 * time.Hour - - go put(1) - go put(2) - - // Give the batch a chance to exhibit bugs. - time.Sleep(10 * time.Millisecond) - - // not triggered yet - select { - case <-ch: - t.Fatalf("batch triggered too early") - default: - } - - go put(3) - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} - -func TestDB_BatchTime(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.MustCreateBucket([]byte("widgets")) - - const size = 1 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = 1000 - db.MaxBatchDelay = 0 - - go put(1) - - // Batch must trigger by time alone. - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - db.MustView(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }) -} diff --git a/bolt_test.go b/bolt_test.go deleted file mode 100644 index b7bea1f..0000000 --- a/bolt_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package bolt_test - -import ( - "fmt" - "path/filepath" - "reflect" - "runtime" - "testing" -) - -// assert fails the test if the condition is false. -func assert(tb testing.TB, condition bool, msg string, v ...interface{}) { - if !condition { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: "+msg+"\033[39m\n\n", append([]interface{}{filepath.Base(file), line}, v...)...) - tb.FailNow() - } -} - -// ok fails the test if an err is not nil. -func ok(tb testing.TB, err error) { - if err != nil { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) - tb.FailNow() - } -} - -// equals fails the test if exp is not equal to act. -func equals(tb testing.TB, exp, act interface{}) { - if !reflect.DeepEqual(exp, act) { - _, file, line, _ := runtime.Caller(1) - fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) - tb.FailNow() - } -} diff --git a/bucket_test.go b/bucket_test.go index 57b1d12..480ed08 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -5,6 +5,7 @@ import ( "encoding/binary" "errors" "fmt" + "log" "math/rand" "os" "strconv" @@ -17,94 +18,150 @@ import ( // Ensure that a bucket that gets a non-existent key returns nil. func TestBucket_Get_NonExistent(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatal("expected nil value") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can read a value that is not flushed yet. func TestBucket_Get_FromNode(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - value := b.Get([]byte("foo")) - equals(t, []byte("bar"), value) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { + t.Fatalf("unexpected value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket retrieved via Get() returns a nil. func TestBucket_Get_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + + if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { + t.Fatal("expected nil value") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can write a key/value. func TestBucket_Put(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("bar")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + + v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + if !bytes.Equal([]byte("bar"), v) { + t.Fatalf("unexpected value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can rewrite a key in the same transaction. func TestBucket_Put_Repeat(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("foo"), []byte("bar"))) - ok(t, b.Put([]byte("foo"), []byte("baz"))) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("baz")); err != nil { + t.Fatal(err) + } + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - equals(t, value, []byte("baz")) + if !bytes.Equal([]byte("baz"), value) { + t.Fatalf("unexpected value: %v", value) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can write a bunch of large values. func TestBucket_Put_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() count, factor := 100, 200 - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } for i := 1; i < count; i++ { - ok(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor)))) + if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil { + t.Fatal(err) + } } return nil - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i < count; i++ { value := b.Get([]byte(strings.Repeat("0", i*factor))) - equals(t, []byte(strings.Repeat("X", (count-i)*factor)), value) + if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) { + t.Fatalf("unexpected value: %v", value) + } } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a database can perform multiple large appends safely. @@ -116,104 +173,170 @@ func TestDB_Put_VeryLarge(t *testing.T) { n, batchN := 400000, 200000 ksize, vsize := 8, 500 - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() for i := 0; i < n; i += batchN { - err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + if err != nil { + t.Fatal(err) + } for j := 0; j < batchN; j++ { k, v := make([]byte, ksize), make([]byte, vsize) binary.BigEndian.PutUint32(k, uint32(i+j)) - ok(t, b.Put(k, v)) + if err := b.Put(k, v); err != nil { + t.Fatal(err) + } } return nil - }) - ok(t, err) + }); err != nil { + t.Fatal(err) + } } } // Ensure that a setting a value on a key with a bucket value returns an error. func TestBucket_Put_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b0, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a setting a value while the transaction is closed returns an error. func TestBucket_Put_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Put([]byte("foo"), []byte("bar"))) + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that setting a value on a read-only bucket returns an error. func TestBucket_Put_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - err := b.Put([]byte("foo"), []byte("bar")) - equals(t, err, bolt.ErrTxNotWritable) + if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can delete an existing key. func TestBucket_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - ok(t, err) - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - assert(t, value == nil, "") + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("unexpected value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a large set of keys will work correctly. func TestBucket_Delete_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - var b, _ = tx.CreateBucket([]byte("widgets")) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 100; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)))) + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil { + t.Fatal(err) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < 100; i++ { + if err := b.Delete([]byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } } return nil - }) - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { - ok(t, b.Delete([]byte(strconv.Itoa(i)))) + if v := b.Get([]byte(strconv.Itoa(i))); v != nil { + t.Fatalf("unexpected value: %v, i=%d", v, i) + } } return nil - }) - db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - assert(t, b.Get([]byte(strconv.Itoa(i))) == nil, "") - } - return nil - }) + }); err != nil { + t.Fatal(err) + } } // Deleting a very large list of keys will cause the freelist to use overflow. @@ -222,11 +345,12 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { t.Skip("skipping test in short mode.") } - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() + k := make([]byte, 16) for i := uint64(0); i < 10000; i++ { - err := db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("0")) if err != nil { t.Fatalf("bucket error: %s", err) @@ -241,272 +365,450 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { } return nil - }) - - if err != nil { - t.Fatalf("update error: %s", err) + }); err != nil { + t.Fatal(err) } } // Delete all of them in one large transaction - err := db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("0")) c := b.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { - c.Delete() + if err := c.Delete(); err != nil { + t.Fatal(err) + } } return nil - }) - - // Check that a freelist overflow occurred. - ok(t, err) + }); err != nil { + t.Fatal(err) + } } // Ensure that accessing and updating nested buckets is ok across transactions. func TestBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { // Create a widgets bucket. b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) + if err != nil { + t.Fatal(err) + } // Create a widgets/foo bucket. _, err = b.CreateBucket([]byte("foo")) - ok(t, err) + if err != nil { + t.Fatal(err) + } // Create a widgets/bar key. - ok(t, b.Put([]byte("bar"), []byte("0000"))) + if err := b.Put([]byte("bar"), []byte("0000")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } db.MustCheck() // Update widgets/bar. - db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - ok(t, b.Put([]byte("bar"), []byte("xxxx"))) + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } db.MustCheck() // Cause a split. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))) + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } } return nil - }) + }); err != nil { + t.Fatal(err) + } db.MustCheck() // Insert into widgets/foo/baz. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - ok(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy"))) + if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } db.MustCheck() // Verify. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { var b = tx.Bucket([]byte("widgets")) - equals(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz"))) - equals(t, []byte("xxxx"), b.Get([]byte("bar"))) + if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) { + t.Fatalf("unexpected value: %v") + } + if !bytes.Equal(b.Get([]byte("bar")), []byte("xxxx")) { + t.Fatalf("unexpected value: %v") + } for i := 0; i < 10000; i++ { - equals(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i)))) + if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) { + t.Fatalf("unexpected value: %v", v) + } } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a bucket using Delete() returns an error. func TestBucket_Delete_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - _, err := b.CreateBucket([]byte("foo")) - ok(t, err) - equals(t, bolt.ErrIncompatibleValue, b.Delete([]byte("foo"))) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a key on a read-only bucket returns an error. func TestBucket_Delete_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - err := b.Delete([]byte("foo")) - equals(t, err, bolt.ErrTxNotWritable) + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a deleting value while the transaction is closed returns an error. func TestBucket_Delete_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - equals(t, bolt.ErrTxClosed, b.Delete([]byte("foo"))) + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that deleting a bucket causes nested buckets to be deleted. func TestBucket_DeleteBucket_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) - ok(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + bar, err := foo.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. func TestBucket_DeleteBucket_Nested2(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat"))) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + + bar, err := foo.CreateBucket([]byte("bar")) + if err != nil { + t.Fatal(err) + } + + if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } return nil - }) - db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) != nil, "") - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")) != nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz"))) - ok(t, tx.DeleteBucket([]byte("widgets"))) + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + widgets := tx.Bucket([]byte("widgets")) + if widgets == nil { + t.Fatal("expected widgets bucket") + } + + foo := widgets.Bucket([]byte("foo")) + if foo == nil { + t.Fatal("expected foo bucket") + } + + bar := foo.Bucket([]byte("bar")) + if bar == nil { + t.Fatal("expected bar bucket") + } + + if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) == nil, "") + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) != nil { + t.Fatal("expected bucket to be deleted") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a child bucket with multiple pages causes all pages to get collected. +// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly. func TestBucket_DeleteBucket_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - ok(t, err) - b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + foo, err := widgets.CreateBucket([]byte("foo")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 1000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)))) + if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil { + t.Fatal(err) + } } return nil - }) - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - return nil - }) + }); err != nil { + t.Fatal(err) + } - // NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly. + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } } // Ensure that a simple value retrieved via Bucket() returns a nil. func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - assert(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) == nil, "") + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil { + t.Fatal("expected nil bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that creating a bucket on an existing non-bucket key returns an error. func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - _, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")) - equals(t, bolt.ErrIncompatibleValue, err) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a bucket on an existing non-bucket key returns an error. func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) - equals(t, bolt.ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can return an autoincrementing sequence. func TestBucket_NextSequence(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + widgets, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + woojits, err := tx.CreateBucket([]byte("woojits")) + if err != nil { + t.Fatal(err) + } // Make sure sequence increments. - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) - seq, err = tx.Bucket([]byte("widgets")).NextSequence() - ok(t, err) - equals(t, seq, uint64(2)) + if seq, err := widgets.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 1 { + t.Fatalf("unexpecte sequence: %d", seq) + } + + if seq, err := widgets.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 2 { + t.Fatalf("unexpected sequence: %d", seq) + } // Buckets should be separate. - seq, err = tx.Bucket([]byte("woojits")).NextSequence() - ok(t, err) - equals(t, seq, uint64(1)) + if seq, err := woojits.NextSequence(); err != nil { + t.Fatal(err) + } else if seq != 1 { + t.Fatalf("unexpected sequence: %d", 1) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket will persist an autoincrementing sequence even if its // the only thing updated on the bucket. // https://github.com/boltdb/bolt/issues/296 func TestBucket_NextSequence_Persist(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.CreateBucket([]byte("widgets")) - return nil - }) + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { - _, _ = tx.Bucket([]byte("widgets")).NextSequence() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { seq, err := tx.Bucket([]byte("widgets")).NextSequence() if err != nil { t.Fatalf("unexpected error: %s", err) @@ -514,199 +816,326 @@ func TestBucket_NextSequence_Persist(t *testing.T) { t.Fatalf("unexpected sequence: %d", seq) } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that retrieving the next sequence on a read-only bucket returns an error. func TestBucket_NextSequence_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - i, err := b.NextSequence() - equals(t, i, uint64(0)) - equals(t, err, bolt.ErrTxNotWritable) + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + _, err := tx.Bucket([]byte("widgets")).NextSequence() + if err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that retrieving the next sequence for a bucket on a closed database return an error. func TestBucket_NextSequence_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - _, err := b.NextSequence() - equals(t, bolt.ErrTxClosed, err) + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if _, err := b.NextSequence(); err != bolt.ErrTxClosed { + t.Fatal(err) + } } // Ensure a user can loop over all key/value pairs in a bucket. func TestBucket_ForEach(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002")) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0001")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0002")); err != nil { + t.Fatal(err) + } var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + if err := b.ForEach(func(k, v []byte) error { switch index { case 0: - equals(t, k, []byte("bar")) - equals(t, v, []byte("0002")) + if !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } case 1: - equals(t, k, []byte("baz")) - equals(t, v, []byte("0001")) + if !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0001")) { + t.Fatalf("unexpected value: %v", v) + } case 2: - equals(t, k, []byte("foo")) - equals(t, v, []byte("0000")) + if !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0000")) { + t.Fatalf("unexpected value: %v", v) + } } index++ return nil - }) - ok(t, err) - equals(t, index, 3) + }); err != nil { + t.Fatal(err) + } + + if index != 3 { + t.Fatalf("unexpected index: %d", index) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a database can stop iteration early. func TestBucket_ForEach_ShortCircuit(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0000")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0000")); err != nil { + t.Fatal(err) + } var index int - err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { index++ if bytes.Equal(k, []byte("baz")) { return errors.New("marker") } return nil - }) - equals(t, errors.New("marker"), err) - equals(t, 2, index) + }); err == nil || err.Error() != "marker" { + t.Fatalf("unexpected error: %s", err) + } + if index != 2 { + t.Fatalf("unexpected index: %d", index) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that looping over a bucket on a closed database returns an error. func TestBucket_ForEach_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - tx.Rollback() - err := b.ForEach(func(k, v []byte) error { return nil }) - equals(t, bolt.ErrTxClosed, err) + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + + if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that an error is returned when inserting with an empty key. func TestBucket_Put_EmptyKey(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) - err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar")) - equals(t, err, bolt.ErrKeyRequired) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired { + t.Fatalf("unexpected error: %s", err) + } + if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that an error is returned when inserting with a key that's too large. func TestBucket_Put_KeyTooLarge(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar")) - equals(t, err, bolt.ErrKeyTooLarge) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that an error is returned when inserting a value that's too large. func TestBucket_Put_ValueTooLarge(t *testing.T) { + // Skip this test on DroneCI because the machine is resource constrained. if os.Getenv("DRONE") == "true" { t.Skip("not enough RAM for test") } - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)) - equals(t, err, bolt.ErrValueTooLarge) + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a bucket can calculate stats. func TestBucket_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Add bucket with fewer keys but one big value. - big_key := []byte("really-big-value") + bigKey := []byte("really-big-value") for i := 0; i < 500; i++ { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) - }) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("woojits")) + if err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + } + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) } - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) - return b.Put(big_key, []byte(strings.Repeat("*", 10000))) - }) db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("woojits")) - stats := b.Stats() - equals(t, 1, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 7, stats.LeafPageN) - equals(t, 2, stats.LeafOverflowN) - equals(t, 501, stats.KeyN) - equals(t, 2, stats.Depth) + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("woojits")).Stats() + if stats.BranchPageN != 1 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 7 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 2 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 501 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 2 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } branchInuse := 16 // branch page header branchInuse += 7 * 16 // branch elements branchInuse += 7 * 3 // branch keys (6 3-byte keys) - equals(t, branchInuse, stats.BranchInuse) + if stats.BranchInuse != branchInuse { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } leafInuse := 7 * 16 // leaf page header leafInuse += 501 * 16 // leaf elements - leafInuse += 500*3 + len(big_key) // leaf keys + leafInuse += 500*3 + len(bigKey) // leaf keys leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - equals(t, leafInuse, stats.LeafInuse) - - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 4096, stats.BranchAlloc) - equals(t, 36864, stats.LeafAlloc) + if stats.LeafInuse != leafInuse { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) + } + + // Only check allocations for 4KB pages. + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 4096 { + t.Fatalf("unexpected BranchAlloc:", stats.BranchAlloc) + } else if stats.LeafAlloc != 36864 { + t.Fatalf("unexpected LeafAlloc:", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN:", stats.BucketN) + } else if stats.InlineBucketN != 0 { + t.Fatalf("unexpected InlineBucketN:", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 0 { + t.Fatalf("unexpected InlineBucketInuse:", stats.InlineBucketInuse) } - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a bucket with random insertion utilizes fill percentage correctly. @@ -717,150 +1146,251 @@ func TestBucket_Stats_RandomFill(t *testing.T) { t.Skip("invalid page size for test") } - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Add a set of values in random order. It will be the same random // order so we can maintain consistency between test runs. var count int - r := rand.New(rand.NewSource(42)) - for _, i := range r.Perm(1000) { - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + rand := rand.New(rand.NewSource(42)) + for _, i := range rand.Perm(1000) { + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("woojits")) + if err != nil { + t.Fatal(err) + } b.FillPercent = 0.9 - for _, j := range r.Perm(100) { + for _, j := range rand.Perm(100) { index := (j * 10000) + i - b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) + if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil { + t.Fatal(err) + } count++ } return nil - }) + }); err != nil { + t.Fatal(err) + } } + db.MustCheck() - db.View(func(tx *bolt.Tx) error { - s := tx.Bucket([]byte("woojits")).Stats() - equals(t, 100000, s.KeyN) + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("woojits")).Stats() + if stats.KeyN != 100000 { + t.Fatalf("unexpected KeyN", stats.KeyN) + } - equals(t, 98, s.BranchPageN) - equals(t, 0, s.BranchOverflowN) - equals(t, 130984, s.BranchInuse) - equals(t, 401408, s.BranchAlloc) + if stats.BranchPageN != 98 { + t.Fatalf("unexpected BranchPageN", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN", stats.BranchOverflowN) + } else if stats.BranchInuse != 130984 { + t.Fatalf("unexpected BranchInuse", stats.BranchInuse) + } else if stats.BranchAlloc != 401408 { + t.Fatalf("unexpected BranchAlloc", stats.BranchAlloc) + } - equals(t, 3412, s.LeafPageN) - equals(t, 0, s.LeafOverflowN) - equals(t, 4742482, s.LeafInuse) - equals(t, 13975552, s.LeafAlloc) + if stats.LeafPageN != 3412 { + t.Fatalf("unexpected LeafPageN", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN", stats.LeafOverflowN) + } else if stats.LeafInuse != 4742482 { + t.Fatalf("unexpected LeafInuse", stats.LeafInuse) + } else if stats.LeafAlloc != 13975552 { + t.Fatalf("unexpected LeafAlloc", stats.LeafAlloc) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a bucket can calculate stats. func TestBucket_Stats_Small(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. b, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) - b.Put([]byte("foo"), []byte("bar")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } + db.MustCheck() - db.View(func(tx *bolt.Tx) error { + + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 1, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN) + } else if stats.LeafPageN != 0 { + t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN) + } else if stats.KeyN != 1 { + t.Fatalf("unexpected KeyN: ", stats.KeyN) + } else if stats.Depth != 1 { + t.Fatalf("unexpected Depth: ", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse) + } else if stats.LeafInuse != 0 { + t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse) } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16+16+6, stats.InlineBucketInuse) + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc) + } else if stats.LeafAlloc != 0 { + t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: ", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 16+16+6 { + t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } func TestBucket_Stats_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { // Add a bucket that fits on a single root leaf. - _, err := tx.CreateBucket([]byte("whozawhats")) - ok(t, err) + if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } + db.MustCheck() - db.View(func(tx *bolt.Tx) error { + + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("whozawhats")) stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 0, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 0, stats.KeyN) - equals(t, 1, stats.Depth) - equals(t, 0, stats.BranchInuse) - equals(t, 0, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 0, stats.LeafAlloc) + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN) + } else if stats.LeafPageN != 0 { + t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN) + } else if stats.KeyN != 0 { + t.Fatalf("unexpected KeyN: ", stats.KeyN) + } else if stats.Depth != 1 { + t.Fatalf("unexpected Depth: ", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse) + } else if stats.LeafInuse != 0 { + t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse) } - equals(t, 1, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, 16, stats.InlineBucketInuse) + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc) + } else if stats.LeafAlloc != 0 { + t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: ", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 16 { + t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a bucket can calculate stats. func TestBucket_Stats_Nested(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("foo")) - ok(t, err) + if err != nil { + t.Fatal(err) + } for i := 0; i < 100; i++ { - b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) + if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil { + t.Fatal(err) + } } + bar, err := b.CreateBucket([]byte("bar")) - ok(t, err) - for i := 0; i < 10; i++ { - bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) + if err != nil { + t.Fatal(err) } + for i := 0; i < 10; i++ { + if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + baz, err := bar.CreateBucket([]byte("baz")) - ok(t, err) - for i := 0; i < 10; i++ { - baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))) + if err != nil { + t.Fatal(err) } + for i := 0; i < 10; i++ { + if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + t.Fatal(err) + } + } + return nil - }) + }); err != nil { + t.Fatal(err) + } db.MustCheck() - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("foo")) stats := b.Stats() - equals(t, 0, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 2, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 122, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 0, stats.BranchInuse) + if stats.BranchPageN != 0 { + t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN) + } else if stats.LeafPageN != 2 { + t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN) + } else if stats.KeyN != 122 { + t.Fatalf("unexpected KeyN: ", stats.KeyN) + } else if stats.Depth != 3 { + t.Fatalf("unexpected Depth: ", stats.Depth) + } else if stats.BranchInuse != 0 { + t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse) + } foo := 16 // foo (pghdr) foo += 101 * 16 // foo leaf elements @@ -876,17 +1406,30 @@ func TestBucket_Stats_Nested(t *testing.T) { baz += 10 * 16 // baz leaf elements baz += 10 + 10 // baz leaf key/values - equals(t, foo+bar+baz, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 0, stats.BranchAlloc) - equals(t, 8192, stats.LeafAlloc) + if stats.LeafInuse != foo+bar+baz { + t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse) } - equals(t, 3, stats.BucketN) - equals(t, 1, stats.InlineBucketN) - equals(t, baz, stats.InlineBucketInuse) + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 0 { + t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc) + } else if stats.LeafAlloc != 8192 { + t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc) + } + } + + if stats.BucketN != 3 { + t.Fatalf("unexpected BucketN: ", stats.BucketN) + } else if stats.InlineBucketN != 1 { + t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN) + } else if stats.InlineBucketInuse != baz { + t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a large bucket can calculate stats. @@ -895,44 +1438,71 @@ func TestBucket_Stats_Large(t *testing.T) { t.Skip("skipping test in short mode.") } - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() var index int for i := 0; i < 100; i++ { - db.Update(func(tx *bolt.Tx) error { - // Add bucket with lots of keys. - b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) + // Add bucket with lots of keys. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("widgets")) + if err != nil { + t.Fatal(err) + } for i := 0; i < 1000; i++ { - b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) + if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil { + t.Fatal(err) + } index++ } return nil - }) + }); err != nil { + t.Fatal(err) + } } + db.MustCheck() - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - stats := b.Stats() - equals(t, 13, stats.BranchPageN) - equals(t, 0, stats.BranchOverflowN) - equals(t, 1196, stats.LeafPageN) - equals(t, 0, stats.LeafOverflowN) - equals(t, 100000, stats.KeyN) - equals(t, 3, stats.Depth) - equals(t, 25257, stats.BranchInuse) - equals(t, 2596916, stats.LeafInuse) - if os.Getpagesize() == 4096 { - // Incompatible page size - equals(t, 53248, stats.BranchAlloc) - equals(t, 4898816, stats.LeafAlloc) + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("widgets")).Stats() + if stats.BranchPageN != 13 { + t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) + } else if stats.BranchOverflowN != 0 { + t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) + } else if stats.LeafPageN != 1196 { + t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) + } else if stats.LeafOverflowN != 0 { + t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) + } else if stats.KeyN != 100000 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } else if stats.Depth != 3 { + t.Fatalf("unexpected Depth: %d", stats.Depth) + } else if stats.BranchInuse != 25257 { + t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) + } else if stats.LeafInuse != 2596916 { + t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) } - equals(t, 1, stats.BucketN) - equals(t, 0, stats.InlineBucketN) - equals(t, 0, stats.InlineBucketInuse) + + if os.Getpagesize() == 4096 { + if stats.BranchAlloc != 53248 { + t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) + } else if stats.LeafAlloc != 4898816 { + t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) + } + } + + if stats.BucketN != 1 { + t.Fatalf("unexpected BucketN: %d", stats.BucketN) + } else if stats.InlineBucketN != 0 { + t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) + } else if stats.InlineBucketInuse != 0 { + t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can write random keys and values across multiple transactions. @@ -942,27 +1512,34 @@ func TestBucket_Put_Single(t *testing.T) { } index := 0 - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() m := make(map[string][]byte) - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + for _, item := range items { - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { panic("put error: " + err.Error()) } m[string(item.Key)] = item.Value return nil - }) + }); err != nil { + t.Fatal(err) + } // Verify all key/values so far. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { i := 0 for k, v := range m { value := tx.Bucket([]byte("widgets")).Get([]byte(k)) @@ -974,13 +1551,14 @@ func TestBucket_Put_Single(t *testing.T) { i++ } return nil - }) + }); err != nil { + t.Fatal(err) + } } index++ return true - } - if err := quick.Check(f, qconfig()); err != nil { + }, nil); err != nil { t.Error(err) } } @@ -991,25 +1569,34 @@ func TestBucket_Put_Multiple(t *testing.T) { t.Skip("skipping test in short mode.") } - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) } return nil - }) - ok(t, err) + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } // Verify all items exist. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { value := b.Get(item.Key) @@ -1019,10 +1606,12 @@ func TestBucket_Put_Multiple(t *testing.T) { } } return nil - }) + }); err != nil { + t.Fatal(err) + } + return true - } - if err := quick.Check(f, qconfig()); err != nil { + }, qconfig()); err != nil { t.Error(err) } } @@ -1033,68 +1622,98 @@ func TestBucket_Delete_Quick(t *testing.T) { t.Skip("skipping test in short mode.") } - f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() + if err := quick.Check(func(items testdata) bool { + db := MustOpenDB() + defer db.MustClose() + // Bulk insert all values. - db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) } return nil - }) - ok(t, err) + }); err != nil { + t.Fatal(err) + } + + if err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + return nil + }); err != nil { + t.Fatal(err) + } // Remove items one at a time and check consistency. for _, item := range items { - err := db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Delete(item.Key) - }) - ok(t, err) + }); err != nil { + t.Fatal(err) + } } // Anything before our deletion index should be nil. - db.View(func(tx *bolt.Tx) error { - tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) return nil - }) + }); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } + return true - } - if err := quick.Check(f, qconfig()); err != nil { + }, qconfig()); err != nil { t.Error(err) } } func ExampleBucket_Put() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { // Create a bucket. - tx.CreateBucket([]byte("widgets")) + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } // Set the value "bar" for the key "foo". - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } return nil - }) + }); err != nil { + log.Fatal(err) + } // Read value back in a different read-only transaction. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value of 'foo' is: %s\n", value) return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // The value of 'foo' is: bar @@ -1102,38 +1721,56 @@ func ExampleBucket_Put() { func ExampleBucket_Delete() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Start a write transaction. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { // Create a bucket. - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } // Set the value "bar" for the key "foo". - b.Put([]byte("foo"), []byte("bar")) + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } // Retrieve the key back from the database and verify it. value := b.Get([]byte("foo")) fmt.Printf("The value of 'foo' was: %s\n", value) + return nil - }) + }); err != nil { + log.Fatal(err) + } // Delete the key in a different write transaction. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - }) + }); err != nil { + log.Fatal(err) + } // Retrieve the key again. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) if value == nil { fmt.Printf("The value of 'foo' is now: nil\n") } return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // The value of 'foo' was: bar @@ -1142,25 +1779,46 @@ func ExampleBucket_Delete() { func ExampleBucket_ForEach() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("animals")) - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } + + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + return err + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + return err + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + return err + } // Iterate over items in sorted key order. - b.ForEach(func(k, v []byte) error { + if err := b.ForEach(func(k, v []byte) error { fmt.Printf("A %s is %s.\n", k, v) return nil - }) + }); err != nil { + return err + } + return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // A cat is lame. diff --git a/cursor_test.go b/cursor_test.go index d748852..562d60f 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -4,7 +4,9 @@ import ( "bytes" "encoding/binary" "fmt" + "log" "os" + "reflect" "sort" "testing" "testing/quick" @@ -14,100 +16,149 @@ import ( // Ensure that a cursor can return a reference to the bucket that created it. func TestCursor_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) - c := b.Cursor() - equals(t, b, c.Bucket()) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) { + t.Fatal("cursor bucket mismatch") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can seek to the appropriate keys. func TestCursor_Seek(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - ok(t, b.Put([]byte("foo"), []byte("0001"))) - ok(t, b.Put([]byte("bar"), []byte("0002"))) - ok(t, b.Put([]byte("baz"), []byte("0003"))) - _, err = b.CreateBucket([]byte("bkt")) - ok(t, err) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("0001")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte("0002")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("0003")); err != nil { + t.Fatal(err) + } + + if _, err := b.CreateBucket([]byte("bkt")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() // Exact match should go to the key. - k, v := c.Seek([]byte("bar")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) + if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } // Inexact match should go to the next key. - k, v = c.Seek([]byte("bas")) - equals(t, []byte("baz"), k) - equals(t, []byte("0003"), v) + if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0003")) { + t.Fatalf("unexpected value: %v", v) + } // Low key should go to the first key. - k, v = c.Seek([]byte("")) - equals(t, []byte("bar"), k) - equals(t, []byte("0002"), v) + if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte("0002")) { + t.Fatalf("unexpected value: %v", v) + } // High key should return no key. - k, v = c.Seek([]byte("zzz")) - assert(t, k == nil, "") - assert(t, v == nil, "") + if k, v := c.Seek([]byte("zzz")); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } // Buckets should return their key but no value. - k, v = c.Seek([]byte("bkt")) - equals(t, []byte("bkt"), k) - assert(t, v == nil, "") + if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } func TestCursor_Delete(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - var count = 1000 + const count = 1000 // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } for i := 0; i < count; i += 1 { k := make([]byte, 8) binary.BigEndian.PutUint64(k, uint64(i)) - b.Put(k, make([]byte, 100)) + if err := b.Put(k, make([]byte, 100)); err != nil { + t.Fatal(err) + } + } + if _, err := b.CreateBucket([]byte("sub")); err != nil { + t.Fatal(err) } - b.CreateBucket([]byte("sub")) return nil - }) + }); err != nil { + t.Fatal(err) + } - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() bound := make([]byte, 8) binary.BigEndian.PutUint64(bound, uint64(count/2)) for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { if err := c.Delete(); err != nil { - return err + t.Fatal(err) } } - c.Seek([]byte("sub")) - err := c.Delete() - equals(t, err, bolt.ErrIncompatibleValue) - return nil - }) - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - equals(t, b.Stats().KeyN, count/2+1) + c.Seek([]byte("sub")) + if err := c.Delete(); err != bolt.ErrIncompatibleValue { + t.Fatalf("unexpected error: %s", err) + } + return nil - }) + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { + stats := tx.Bucket([]byte("widgets")).Stats() + if stats.KeyN != count/2+1 { + t.Fatalf("unexpected KeyN: %d", stats.KeyN) + } + return nil + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can seek to the appropriate keys when there are a @@ -116,25 +167,33 @@ func TestCursor_Delete(t *testing.T) { // // Related: https://github.com/boltdb/bolt/pull/187 func TestCursor_Seek_Large(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() var count = 10000 // Insert every other key between 0 and $count. - db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucket([]byte("widgets")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for i := 0; i < count; i += 100 { for j := i; j < i+100; j += 2 { k := make([]byte, 8) binary.BigEndian.PutUint64(k, uint64(j)) - b.Put(k, make([]byte, 100)) + if err := b.Put(k, make([]byte, 100)); err != nil { + t.Fatal(err) + } } } return nil - }) + }); err != nil { + t.Fatal(err) + } - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() for i := 0; i < count; i++ { seek := make([]byte, 8) @@ -145,171 +204,269 @@ func TestCursor_Seek_Large(t *testing.T) { // The last seek is beyond the end of the the range so // it should return nil. if i == count-1 { - assert(t, k == nil, "") + if k != nil { + t.Fatal("expected nil key") + } continue } // Otherwise we should seek to the exact key or the next key. num := binary.BigEndian.Uint64(k) if i%2 == 0 { - equals(t, uint64(i), num) + if num != uint64(i) { + t.Fatalf("unexpected num: %d", num) + } } else { - equals(t, uint64(i+1), num) + if num != uint64(i+1) { + t.Fatalf("unexpected num: %d", num) + } } } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a cursor can iterate over an empty bucket without error. func TestCursor_EmptyBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can reverse iterate over an empty bucket without error. func TestCursor_EmptyBucketReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.Last() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can iterate over a single root with a couple elements. func TestCursor_Iterate_Leaf(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{0}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{1}); err != nil { + t.Fatal(err) + } return nil - }) - tx, _ := db.Begin(false) + }); err != nil { + t.Fatal(err) + } + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + defer func() { _ = tx.Rollback() }() + c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) + if !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{1}) { + t.Fatalf("unexpected value: %v", v) + } k, v = c.Next() - equals(t, string(k), "baz") - equals(t, v, []byte{}) + if !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{}) { + t.Fatalf("unexpected value: %v", v) + } k, v = c.Next() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) + if !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{0}) { + t.Fatalf("unexpected value: %v", v) + } k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } - tx.Rollback() + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. func TestCursor_LeafRootReverse(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0}) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1}) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{0}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{1}); err != nil { + t.Fatal(err) + } return nil - }) - tx, _ := db.Begin(false) + }); err != nil { + t.Fatal(err) + } + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - equals(t, string(k), "foo") - equals(t, v, []byte{0}) + if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{0}) { + t.Fatalf("unexpected value: %v", v) + } - k, v = c.Prev() - equals(t, string(k), "baz") - equals(t, v, []byte{}) + if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{}) { + t.Fatalf("unexpected value: %v", v) + } - k, v = c.Prev() - equals(t, string(k), "bar") - equals(t, v, []byte{1}) + if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, []byte{1}) { + t.Fatalf("unexpected value: %v", v) + } - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k, v := c.Prev(); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } - k, v = c.Prev() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k, v := c.Prev(); k != nil { + t.Fatalf("expected nil key: %v", k) + } else if v != nil { + t.Fatalf("expected nil value: %v", v) + } - tx.Rollback() + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can restart from the beginning. func TestCursor_Restart(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{}) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{}) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("bar"), []byte{}); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte{}); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } - tx, _ := db.Begin(false) + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } c := tx.Bucket([]byte("widgets")).Cursor() - k, _ := c.First() - equals(t, string(k), "bar") + if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } + if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } - k, _ = c.Next() - equals(t, string(k), "foo") + if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { + t.Fatalf("unexpected key: %v", k) + } + if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { + t.Fatalf("unexpected key: %v", k) + } - k, _ = c.First() - equals(t, string(k), "bar") - - k, _ = c.Next() - equals(t, string(k), "foo") - - tx.Rollback() + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } } // Ensure that a cursor can skip over empty pages that have been deleted. func TestCursor_First_EmptyPages(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Create 1000 keys in the "widgets" bucket. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) @@ -322,10 +479,12 @@ func TestCursor_First_EmptyPages(t *testing.T) { } return nil - }) + }); err != nil { + t.Fatal(err) + } // Delete half the keys and then try to iterate. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 600; i++ { if err := b.Delete(u64tob(uint64(i))); err != nil { @@ -343,38 +502,61 @@ func TestCursor_First_EmptyPages(t *testing.T) { } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx can iterate over all elements in a bucket. func TestCursor_QuickCheck(t *testing.T) { f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) } - ok(t, tx.Commit()) // Sort test data. sort.Sort(items) // Iterate over all items and check consistency. var index = 0 - tx, _ = db.Begin(false) + tx, err = db.Begin(false) + if err != nil { + t.Fatal(err) + } + c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) + if !bytes.Equal(k, items[index].Key) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, items[index].Value) { + t.Fatalf("unexpected value: %v", v) + } index++ } - equals(t, len(items), index) - tx.Rollback() + if len(items) != index { + t.Fatalf("unexpected item count: %v, expected %v", len(items), index) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } return true } @@ -386,32 +568,52 @@ func TestCursor_QuickCheck(t *testing.T) { // Ensure that a transaction can iterate over all elements in a bucket in reverse. func TestCursor_QuickCheck_Reverse(t *testing.T) { f := func(items testdata) bool { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Bulk insert all values. - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - ok(t, b.Put(item.Key, item.Value)) + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + for _, item := range items { + if err := b.Put(item.Key, item.Value); err != nil { + t.Fatal(err) + } + } + if err := tx.Commit(); err != nil { + t.Fatal(err) } - ok(t, tx.Commit()) // Sort test data. sort.Sort(revtestdata(items)) // Iterate over all items and check consistency. var index = 0 - tx, _ = db.Begin(false) + tx, err = db.Begin(false) + if err != nil { + t.Fatal(err) + } c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - equals(t, k, items[index].Key) - equals(t, v, items[index].Value) + if !bytes.Equal(k, items[index].Key) { + t.Fatalf("unexpected key: %v", k) + } else if !bytes.Equal(v, items[index].Value) { + t.Fatalf("unexpected value: %v", v) + } index++ } - equals(t, len(items), index) - tx.Rollback() + if len(items) != index { + t.Fatalf("unexpected item count: %v, expected %v", len(items), index) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } return true } @@ -422,76 +624,114 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) { // Ensure that a Tx cursor can iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("baz")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { var names []string c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { names = append(names, string(k)) - assert(t, v == nil, "") + if v != nil { + t.Fatalf("unexpected value: %v", v) + } + } + if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { + t.Fatalf("unexpected names: %+v", names) } - equals(t, names, []string{"bar", "baz", "foo"}) return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx cursor can reverse iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - ok(t, err) - _, err = b.CreateBucket([]byte("foo")) - ok(t, err) - _, err = b.CreateBucket([]byte("bar")) - ok(t, err) - _, err = b.CreateBucket([]byte("baz")) - ok(t, err) + if err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("bar")); err != nil { + t.Fatal(err) + } + if _, err := b.CreateBucket([]byte("baz")); err != nil { + t.Fatal(err) + } return nil - }) - db.View(func(tx *bolt.Tx) error { + }); err != nil { + t.Fatal(err) + } + + if err := db.View(func(tx *bolt.Tx) error { var names []string c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil; k, v = c.Prev() { names = append(names, string(k)) - assert(t, v == nil, "") + if v != nil { + t.Fatalf("unexpected value: %v", v) + } + } + if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { + t.Fatalf("unexpected names: %+v", names) } - equals(t, names, []string{"foo", "baz", "bar"}) return nil - }) + }); err != nil { + t.Fatal(err) + } } func ExampleCursor() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { // Create a new bucket. - tx.CreateBucket([]byte("animals")) + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + log.Fatal(err) + } // Create a cursor for iteration. c := b.Cursor() @@ -506,7 +746,13 @@ func ExampleCursor() { } return nil - }) + }); err != nil { + log.Fatal(err) + } + + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // A cat is lame. @@ -516,20 +762,30 @@ func ExampleCursor() { func ExampleCursor_reverse() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Start a read-write transaction. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { // Create a new bucket. - tx.CreateBucket([]byte("animals")) + b, err := tx.CreateBucket([]byte("animals")) + if err != nil { + return err + } // Insert data into a bucket. - b := tx.Bucket([]byte("animals")) - b.Put([]byte("dog"), []byte("fun")) - b.Put([]byte("cat"), []byte("lame")) - b.Put([]byte("liger"), []byte("awesome")) + if err := b.Put([]byte("dog"), []byte("fun")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("cat"), []byte("lame")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { + log.Fatal(err) + } // Create a cursor for iteration. c := b.Cursor() @@ -545,7 +801,14 @@ func ExampleCursor_reverse() { } return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close the database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // A liger is awesome. diff --git a/db.go b/db.go index 44ebabb..f559ee5 100644 --- a/db.go +++ b/db.go @@ -1,8 +1,10 @@ package bolt import ( + "errors" "fmt" "hash/fnv" + "log" "os" "runtime" "runtime/debug" @@ -387,7 +389,9 @@ func (db *DB) close() error { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. - _ = funlock(db.file) + if err := funlock(db.file); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } } // Close the file descriptor. @@ -598,6 +602,136 @@ func (db *DB) View(fn func(*Tx) error) error { return nil } +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + // Sync executes fdatasync() against the database file handle. // // This is not necessary under normal operation, however, if you use NoSync diff --git a/db_test.go b/db_test.go index 197071b..c1346b8 100644 --- a/db_test.go +++ b/db_test.go @@ -1,17 +1,21 @@ package bolt_test import ( + "bytes" "encoding/binary" "errors" "flag" "fmt" + "hash/fnv" "io/ioutil" + "log" "os" "path/filepath" "regexp" "runtime" "sort" "strings" + "sync" "testing" "time" "unsafe" @@ -50,24 +54,34 @@ type meta struct { func TestOpen(t *testing.T) { path := tempfile() db, err := bolt.Open(path, 0666, nil) - assert(t, db != nil, "") - ok(t, err) - equals(t, db.Path(), path) - ok(t, db.Close()) + if err != nil { + t.Fatal(err) + } else if db == nil { + t.Fatal("expected db") + } + + if s := db.Path(); s != path { + t.Fatalf("unexpected path: %s", s) + } + + if err := db.Close(); err != nil { + t.Fatal(err) + } +} + +// Ensure that opening a database with a blank path returns an error. +func TestOpen_ErrPathRequired(t *testing.T) { + _, err := bolt.Open("", 0666, nil) + if err == nil { + t.Fatalf("expected error") + } } // Ensure that opening a database with a bad path returns an error. -func TestOpen_BadPath(t *testing.T) { - for _, path := range []string{ - "", - filepath.Join(tempfile(), "youre-not-my-real-parent"), - } { - t.Logf("path = %q", path) - db, err := bolt.Open(path, 0666, nil) - assert(t, err != nil, "err: %s", err) - equals(t, path, err.(*os.PathError).Path) - equals(t, "open", err.(*os.PathError).Op) - equals(t, (*bolt.DB)(nil), db) +func TestOpen_ErrNotExists(t *testing.T) { + _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0666, nil) + if err == nil { + t.Fatal("expected error") } } @@ -81,13 +95,20 @@ func TestOpen_ErrChecksum(t *testing.T) { path := tempfile() f, err := os.Create(path) - equals(t, nil, err) - f.WriteAt(buf, pageHeaderSize) - f.Close() + if err != nil { + t.Fatal(err) + } + if _, err := f.WriteAt(buf, pageHeaderSize); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } defer os.Remove(path) - _, err = bolt.Open(path, 0666, nil) - equals(t, bolt.ErrChecksum, err) + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that opening a file that is not a Bolt database returns ErrInvalid. @@ -95,13 +116,20 @@ func TestOpen_ErrInvalid(t *testing.T) { path := tempfile() f, err := os.Create(path) - equals(t, nil, err) - fmt.Fprintln(f, "this is not a bolt database") - f.Close() + if err != nil { + t.Fatal(err) + } + if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } defer os.Remove(path) - _, err = bolt.Open(path, 0666, nil) - equals(t, bolt.ErrInvalid, err) + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that opening a file created with a different version of Bolt returns @@ -114,13 +142,20 @@ func TestOpen_ErrVersionMismatch(t *testing.T) { path := tempfile() f, err := os.Create(path) - equals(t, nil, err) - f.WriteAt(buf, pageHeaderSize) - f.Close() + if err != nil { + t.Fatal(err) + } + if _, err := f.WriteAt(buf, pageHeaderSize); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } defer os.Remove(path) - _, err = bolt.Open(path, 0666, nil) - equals(t, bolt.ErrVersionMismatch, err) + if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrVersionMismatch { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that opening an already open database file will timeout. @@ -133,17 +168,26 @@ func TestOpen_Timeout(t *testing.T) { // Open a data file. db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) + if err != nil { + t.Fatal(err) + } else if db0 == nil { + t.Fatal("expected database") + } // Attempt to open the database again. start := time.Now() db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - assert(t, db1 == nil, "") - equals(t, bolt.ErrTimeout, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") + if err != bolt.ErrTimeout { + t.Fatalf("unexpected timeout: %s", err) + } else if db1 != nil { + t.Fatal("unexpected database") + } else if time.Since(start) <= 100*time.Millisecond { + t.Fatal("expected to wait at least timeout duration") + } - db0.Close() + if err := db0.Close(); err != nil { + t.Fatal(err) + } } // Ensure that opening an already open database file will wait until its closed. @@ -156,39 +200,52 @@ func TestOpen_Wait(t *testing.T) { // Open a data file. db0, err := bolt.Open(path, 0666, nil) - assert(t, db0 != nil, "") - ok(t, err) + if err != nil { + t.Fatal(err) + } // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { db0.Close() }) + time.AfterFunc(100*time.Millisecond, func() { _ = db0.Close() }) // Attempt to open the database again. start := time.Now() db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - assert(t, db1 != nil, "") - ok(t, err) - assert(t, time.Since(start) > 100*time.Millisecond, "") + if err != nil { + t.Fatal(err) + } else if time.Since(start) <= 100*time.Millisecond { + t.Fatal("expected to wait at least timeout duration") + } + + if err := db1.Close(); err != nil { + t.Fatal(err) + } } // Ensure that opening a database does not increase its size. // https://github.com/boltdb/bolt/issues/291 func TestOpen_Size(t *testing.T) { // Open a data file. - db := NewTestDB() + db := MustOpenDB() path := db.Path() - defer db.Close() + defer db.MustClose() // Insert until we get above the minimum 4MB size. - ok(t, db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucketIfNotExists([]byte("data")) for i := 0; i < 10000; i++ { - ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000))) + if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil { + t.Fatal(err) + } } return nil - })) + }); err != nil { + t.Fatal(err) + } // Close database and grab the size. - db.DB.Close() + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } sz := fileSize(path) if sz == 0 { t.Fatalf("unexpected new file size: %d", sz) @@ -196,9 +253,20 @@ func TestOpen_Size(t *testing.T) { // Reopen database, update, and check size again. db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) + if err != nil { + t.Fatal(err) + } + if err := db0.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db0.Close(); err != nil { + t.Fatal(err) + } newSz := fileSize(path) if newSz == 0 { t.Fatalf("unexpected new file size: %d", newSz) @@ -218,25 +286,31 @@ func TestOpen_Size_Large(t *testing.T) { } // Open a data file. - db := NewTestDB() + db := MustOpenDB() path := db.Path() - defer db.Close() + defer db.MustClose() // Insert until we get above the minimum 4MB size. var index uint64 for i := 0; i < 10000; i++ { - ok(t, db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucketIfNotExists([]byte("data")) for j := 0; j < 1000; j++ { - ok(t, b.Put(u64tob(index), make([]byte, 50))) + if err := b.Put(u64tob(index), make([]byte, 50)); err != nil { + t.Fatal(err) + } index++ } return nil - })) + }); err != nil { + t.Fatal(err) + } } // Close database and grab the size. - db.DB.Close() + if err := db.DB.Close(); err != nil { + t.Fatal(err) + } sz := fileSize(path) if sz == 0 { t.Fatalf("unexpected new file size: %d", sz) @@ -246,9 +320,18 @@ func TestOpen_Size_Large(t *testing.T) { // Reopen database, update, and check size again. db0, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) })) - ok(t, db0.Close()) + if err != nil { + t.Fatal(err) + } + if err := db0.Update(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) + }); err != nil { + t.Fatal(err) + } + if err := db0.Close(); err != nil { + t.Fatal(err) + } + newSz := fileSize(path) if newSz == 0 { t.Fatalf("unexpected new file size: %d", newSz) @@ -265,14 +348,26 @@ func TestOpen_Check(t *testing.T) { path := tempfile() db, err := bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() + if err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } db, err = bolt.Open(path, 0666, nil) - ok(t, err) - ok(t, db.View(func(tx *bolt.Tx) error { return <-tx.Check() })) - db.Close() + if err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } } // Ensure that write errors to the meta file handler during initialization are returned. @@ -285,14 +380,22 @@ func TestOpen_FileTooSmall(t *testing.T) { path := tempfile() db, err := bolt.Open(path, 0666, nil) - ok(t, err) - db.Close() + if err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } // corrupt the database - ok(t, os.Truncate(path, int64(os.Getpagesize()))) + if err := os.Truncate(path, int64(os.Getpagesize())); err != nil { + t.Fatal(err) + } db, err = bolt.Open(path, 0666, nil) - equals(t, errors.New("file size too small"), err) + if err == nil || err.Error() != "file size too small" { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that a database can be opened in read-only mode by multiple processes @@ -309,50 +412,65 @@ func TestOpen_ReadOnly(t *testing.T) { // Open in read-write mode. db, err := bolt.Open(path, 0666, nil) - ok(t, db.Update(func(tx *bolt.Tx) error { + if err != nil { + t.Fatal(err) + } else if db.IsReadOnly() { + t.Fatal("db should not be in read only mode") + } + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket(bucket) if err != nil { return err } - return b.Put(key, value) - })) - assert(t, db != nil, "") - assert(t, !db.IsReadOnly(), "") - ok(t, err) - ok(t, db.Close()) + if err := b.Put(key, value); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.Close(); err != nil { + t.Fatal(err) + } // Open in read-only mode. db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db0.Close() + if err != nil { + t.Fatal(err) + } // Opening in read-write mode should return an error. - _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}) - assert(t, err != nil, "") + if _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}); err == nil { + t.Fatal("expected error") + } // And again (in read-only mode). db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - ok(t, err) - defer db1.Close() + if err != nil { + t.Fatal(err) + } // Verify both read-only databases are accessible. for _, db := range []*bolt.DB{db0, db1} { // Verify is is in read only mode indeed. - assert(t, db.IsReadOnly(), "") + if !db.IsReadOnly() { + t.Fatal("expected read only mode") + } // Read-only databases should not allow updates. - assert(t, - bolt.ErrDatabaseReadOnly == db.Update(func(*bolt.Tx) error { - panic(`should never get here`) - }), - "") + if err := db.Update(func(*bolt.Tx) error { + panic(`should never get here`) + }); err != bolt.ErrDatabaseReadOnly { + t.Fatalf("unexpected error: %s", err) + } // Read-only databases should not allow beginning writable txns. - _, err = db.Begin(true) - assert(t, bolt.ErrDatabaseReadOnly == err, "") + if _, err := db.Begin(true); err != bolt.ErrDatabaseReadOnly { + t.Fatalf("unexpected error: %s", err) + } // Verify the data. - ok(t, db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucket) if b == nil { return fmt.Errorf("expected bucket `%s`", string(bucket)) @@ -364,7 +482,16 @@ func TestOpen_ReadOnly(t *testing.T) { return fmt.Errorf("expected `%s`, got `%s`", expected, got) } return nil - })) + }); err != nil { + t.Fatal(err) + } + } + + if err := db0.Close(); err != nil { + t.Fatal(err) + } + if err := db1.Close(); err != nil { + t.Fatal(err) } } @@ -380,29 +507,40 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { testWriteSize := 1 << 27 // 134MB db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) - assert(t, err == nil, "") + if err != nil { + t.Fatal(err) + } // create a long-running read transaction // that never gets closed while writing rtx, err := db.Begin(false) - assert(t, err == nil, "") - defer rtx.Rollback() + if err != nil { + t.Fatal(err) + } // create a write transaction wtx, err := db.Begin(true) - assert(t, err == nil, "") + if err != nil { + t.Fatal(err) + } b, err := wtx.CreateBucket([]byte("test")) - assert(t, err == nil, "") + if err != nil { + t.Fatal(err) + } // and commit a large write err = b.Put([]byte("foo"), make([]byte, testWriteSize)) - assert(t, err == nil, "") + if err != nil { + t.Fatal(err) + } done := make(chan struct{}) go func() { - wtx.Commit() + if err := wtx.Commit(); err != nil { + t.Fatal(err) + } done <- struct{}{} }() @@ -411,36 +549,49 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { t.Errorf("unexpected that the reader blocks writer") case <-done: } + + if err := rtx.Rollback(); err != nil { + t.Fatal(err) + } } -// TODO(benbjohnson): Test corruption at every byte of the first two pages. - // Ensure that a database cannot open a transaction when it's not open. -func TestDB_Begin_DatabaseNotOpen(t *testing.T) { +func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { var db bolt.DB - tx, err := db.Begin(false) - assert(t, tx == nil, "") - equals(t, err, bolt.ErrDatabaseNotOpen) + if _, err := db.Begin(false); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that a read-write transaction can be retrieved. func TestDB_BeginRW(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) - assert(t, tx != nil, "") - ok(t, err) - assert(t, tx.DB() == db.DB, "") - equals(t, tx.Writable(), true) - ok(t, tx.Commit()) + if err != nil { + t.Fatal(err) + } else if tx == nil { + t.Fatal("expected tx") + } + + if tx.DB() != db.DB { + t.Fatal("unexpected tx database") + } else if !tx.Writable() { + t.Fatal("expected writable tx") + } + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } } // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB - tx, err := db.Begin(true) - equals(t, err, bolt.ErrDatabaseNotOpen) - assert(t, tx == nil, "") + if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } } func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } @@ -448,8 +599,8 @@ func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) // Ensure that a database cannot close while transactions are open. func testDB_Close_PendingTx(t *testing.T, writable bool) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Start transaction. tx, err := db.Begin(true) @@ -460,7 +611,9 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { // Open update in separate goroutine. done := make(chan struct{}) go func() { - db.Close() + if err := db.Close(); err != nil { + t.Fatal(err) + } close(done) }() @@ -488,247 +641,343 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { // Ensure a database can provide a transactional block. func TestDB_Update(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } + if err := b.Delete([]byte("foo")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("bar")) - b.Put([]byte("baz"), []byte("bat")) - b.Delete([]byte("foo")) + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("expected nil value, got: %v", v) + } + if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } return nil - }) - ok(t, err) - err = db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) - return nil - }) - ok(t, err) + }); err != nil { + t.Fatal(err) + } } // Ensure a closed database returns an error while running a transaction block func TestDB_Update_Closed(t *testing.T) { var db bolt.DB - err := db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) - equals(t, err, bolt.ErrDatabaseNotOpen) + }); err != bolt.ErrDatabaseNotOpen { + t.Fatalf("unexpected error: %s", err) + } } // Ensure a panic occurs while trying to commit a managed transaction. func TestDB_Update_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - var ok bool - db.Update(func(tx *bolt.Tx) error { + var panicked bool + if err := db.Update(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { - ok = true + panicked = true } }() - tx.Commit() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } }() return nil - }) - assert(t, ok, "expected panic") + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } } // Ensure a panic occurs while trying to rollback a managed transaction. func TestDB_Update_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - var ok bool - db.Update(func(tx *bolt.Tx) error { + var panicked bool + if err := db.Update(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { - ok = true + panicked = true } }() - tx.Rollback() + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } }() return nil - }) - assert(t, ok, "expected panic") + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } } // Ensure a panic occurs while trying to commit a managed transaction. func TestDB_View_ManualCommit(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - var ok bool - db.Update(func(tx *bolt.Tx) error { + var panicked bool + if err := db.View(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { - ok = true + panicked = true } }() - tx.Commit() + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } }() return nil - }) - assert(t, ok, "expected panic") + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } } // Ensure a panic occurs while trying to rollback a managed transaction. func TestDB_View_ManualRollback(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() - var ok bool - db.Update(func(tx *bolt.Tx) error { + var panicked bool + if err := db.View(func(tx *bolt.Tx) error { func() { defer func() { if r := recover(); r != nil { - ok = true + panicked = true } }() - tx.Rollback() + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } }() return nil - }) - assert(t, ok, "expected panic") + }); err != nil { + t.Fatal(err) + } else if !panicked { + t.Fatal("expected panic") + } } // Ensure a write transaction that panics does not hold open locks. func TestDB_Update_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() + // Panic during update but recover. func() { defer func() { if r := recover(); r != nil { t.Log("recover: update", r) } }() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } panic("omg") - }) + }); err != nil { + t.Fatal(err) + } }() // Verify we can update again. - err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - ok(t, err) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } // Verify that our change persisted. - err = db.Update(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") + if err := db.Update(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure a database can return an error through a read-only transactional block. func TestDB_View_Error(t *testing.T) { - db := NewTestDB() - defer db.Close() - err := db.View(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + + if err := db.View(func(tx *bolt.Tx) error { return errors.New("xxx") - }) - equals(t, errors.New("xxx"), err) + }); err == nil || err.Error() != "xxx" { + t.Fatalf("unexpected error: %s", err) + } } // Ensure a read transaction that panics does not hold open locks. func TestDB_View_Panic(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - return nil - }) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Panic during view transaction but recover. func() { defer func() { if r := recover(); r != nil { t.Log("recover: view", r) } }() - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") + + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } panic("omg") - }) + }); err != nil { + t.Fatal(err) + } }() // Verify that we can still use read transactions. - db.View(func(tx *bolt.Tx) error { - assert(t, tx.Bucket([]byte("widgets")) != nil, "") + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } return nil - }) -} - -// Ensure that an error is returned when a database write fails. -func TestDB_Commit_WriteFail(t *testing.T) { - t.Skip("pending") // TODO(benbjohnson) + }); err != nil { + t.Fatal(err) + } } // Ensure that DB stats can be returned. func TestDB_Stats(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }) + }); err != nil { + t.Fatal(err) + } + stats := db.Stats() - equals(t, 2, stats.TxStats.PageCount) - equals(t, 0, stats.FreePageN) - equals(t, 2, stats.PendingPageN) + if stats.TxStats.PageCount != 2 { + t.Fatalf("unexpected TxStats.PageCount", stats.TxStats.PageCount) + } else if stats.FreePageN != 0 { + t.Fatalf("unexpected FreePageN != 0", stats.FreePageN) + } else if stats.PendingPageN != 2 { + t.Fatalf("unexpected PendingPageN != 2", stats.PendingPageN) + } } // Ensure that database pages are in expected order and type. func TestDB_Consistency(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }) + }); err != nil { + t.Fatal(err) + } for i := 0; i < 10; i++ { - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))) + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } - db.Update(func(tx *bolt.Tx) error { - p, _ := tx.Page(0) - assert(t, p != nil, "") - equals(t, "meta", p.Type) - p, _ = tx.Page(1) - assert(t, p != nil, "") - equals(t, "meta", p.Type) + if err := db.Update(func(tx *bolt.Tx) error { + if p, _ := tx.Page(0); p == nil { + t.Fatal("expected page") + } else if p.Type != "meta" { + t.Fatalf("unexpected page type: %s", p.Type) + } - p, _ = tx.Page(2) - assert(t, p != nil, "") - equals(t, "free", p.Type) + if p, _ := tx.Page(1); p == nil { + t.Fatal("expected page") + } else if p.Type != "meta" { + t.Fatalf("unexpected page type: %s", p.Type) + } - p, _ = tx.Page(3) - assert(t, p != nil, "") - equals(t, "free", p.Type) + if p, _ := tx.Page(2); p == nil { + t.Fatal("expected page") + } else if p.Type != "free" { + t.Fatalf("unexpected page type: %s", p.Type) + } - p, _ = tx.Page(4) - assert(t, p != nil, "") - equals(t, "leaf", p.Type) + if p, _ := tx.Page(3); p == nil { + t.Fatal("expected page") + } else if p.Type != "free" { + t.Fatalf("unexpected page type: %s", p.Type) + } - p, _ = tx.Page(5) - assert(t, p != nil, "") - equals(t, "freelist", p.Type) + if p, _ := tx.Page(4); p == nil { + t.Fatal("expected page") + } else if p.Type != "leaf" { + t.Fatalf("unexpected page type: %s", p.Type) + } - p, _ = tx.Page(6) - assert(t, p == nil, "") + if p, _ := tx.Page(5); p == nil { + t.Fatal("expected page") + } else if p.Type != "freelist" { + t.Fatalf("unexpected page type: %s", p.Type) + } + + if p, _ := tx.Page(6); p != nil { + t.Fatal("unexpected page") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that DB stats can be subtracted from one another. @@ -739,19 +988,209 @@ func TestDBStats_Sub(t *testing.T) { b.TxStats.PageCount = 10 b.FreePageN = 14 diff := b.Sub(&a) - equals(t, 7, diff.TxStats.PageCount) + if diff.TxStats.PageCount != 7 { + t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount) + } + // free page stats are copied from the receiver and not subtracted - equals(t, 14, diff.FreePageN) + if diff.FreePageN != 14 { + t.Fatalf("unexpected FreePageN: %d", diff.FreePageN) + } +} + +// Ensure two functions can perform updates in a single batch. +func TestDB_Batch(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } + + // Iterate over multiple updates in separate goroutines. + n := 2 + ch := make(chan error) + for i := 0; i < n; i++ { + go func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + }(i) + } + + // Check all responses to make sure there's no error. + for i := 0; i < n; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 0; i < n; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestDB_Batch_Panic(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + var sentinel int + var bork = &sentinel + var problem interface{} + var err error + + // Execute a function inside a batch that panics. + func() { + defer func() { + if p := recover(); p != nil { + problem = p + } + }() + err = db.Batch(func(tx *bolt.Tx) error { + panic(bork) + }) + }() + + // Verify there is no error. + if g, e := err, error(nil); g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } + // Verify the panic was captured. + if g, e := problem, bork; g != e { + t.Fatalf("wrong error: %v != %v", g, e) + } +} + +func TestDB_BatchFull(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + const size = 3 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = size + // high enough to never trigger here + db.MaxBatchDelay = 1 * time.Hour + + go put(1) + go put(2) + + // Give the batch a chance to exhibit bugs. + time.Sleep(10 * time.Millisecond) + + // not triggered yet + select { + case <-ch: + t.Fatalf("batch triggered too early") + default: + } + + go put(3) + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } +} + +func TestDB_BatchTime(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("widgets")) + return err + }); err != nil { + t.Fatal(err) + } + + const size = 1 + // buffered so we never leak goroutines + ch := make(chan error, size) + put := func(i int) { + ch <- db.Batch(func(tx *bolt.Tx) error { + return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) + }) + } + + db.MaxBatchSize = 1000 + db.MaxBatchDelay = 0 + + go put(1) + + // Batch must trigger by time alone. + + // Check all responses to make sure there's no error. + for i := 0; i < size; i++ { + if err := <-ch; err != nil { + t.Fatal(err) + } + } + + // Ensure data is correct. + if err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("widgets")) + for i := 1; i <= size; i++ { + if v := b.Get(u64tob(uint64(i))); v == nil { + t.Errorf("key not found: %d", i) + } + } + return nil + }); err != nil { + t.Fatal(err) + } } func ExampleDB_Update() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() - // Execute several commands within a write transaction. - err := db.Update(func(tx *bolt.Tx) error { + // Execute several commands within a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { return err @@ -760,15 +1199,22 @@ func ExampleDB_Update() { return err } return nil - }) + }); err != nil { + log.Fatal(err) + } - // If our transactional block didn't return an error then our data is saved. - if err == nil { - db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }) + // Read the value back from a separate read-only transaction. + if err := db.View(func(tx *bolt.Tx) error { + value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) + fmt.Printf("The value of 'foo' is: %s\n", value) + return nil + }); err != nil { + log.Fatal(err) + } + + // Close database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) } // Output: @@ -777,25 +1223,42 @@ func ExampleDB_Update() { func ExampleDB_View() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Insert data into a bucket. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("people")) - b := tx.Bucket([]byte("people")) - b.Put([]byte("john"), []byte("doe")) - b.Put([]byte("susy"), []byte("que")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("people")) + if err != nil { + return err + } + if err := b.Put([]byte("john"), []byte("doe")); err != nil { + return err + } + if err := b.Put([]byte("susy"), []byte("que")); err != nil { + return err + } return nil - }) + }); err != nil { + log.Fatal(err) + } // Access data from within a read-only transactional block. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { v := tx.Bucket([]byte("people")).Get([]byte("john")) fmt.Printf("John's last name is %s.\n", v) return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close database to release the file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // John's last name is doe. @@ -803,31 +1266,56 @@ func ExampleDB_View() { func ExampleDB_Begin_ReadOnly() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() - // Create a bucket. - db.Update(func(tx *bolt.Tx) error { + // Create a bucket using a read-write transaction. + if err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }) + }); err != nil { + log.Fatal(err) + } // Create several keys in a transaction. - tx, _ := db.Begin(true) + tx, err := db.Begin(true) + if err != nil { + log.Fatal(err) + } b := tx.Bucket([]byte("widgets")) - b.Put([]byte("john"), []byte("blue")) - b.Put([]byte("abby"), []byte("red")) - b.Put([]byte("zephyr"), []byte("purple")) - tx.Commit() + if err := b.Put([]byte("john"), []byte("blue")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("abby"), []byte("red")); err != nil { + log.Fatal(err) + } + if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil { + log.Fatal(err) + } + if err := tx.Commit(); err != nil { + log.Fatal(err) + } // Iterate over the values in sorted key order. - tx, _ = db.Begin(false) + tx, err = db.Begin(false) + if err != nil { + log.Fatal(err) + } c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { fmt.Printf("%s likes %s\n", k, v) } - tx.Rollback() + + if err := tx.Rollback(); err != nil { + log.Fatal(err) + } + + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // abby likes red @@ -835,51 +1323,195 @@ func ExampleDB_Begin_ReadOnly() { // zephyr likes purple } -// TestDB represents a wrapper around a Bolt DB to handle temporary file -// creation and automatic cleanup on close. -type TestDB struct { +func BenchmarkDBBatchAutomatic(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Batch(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchSingle(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for round := 0; round < 1000; round++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + h := fnv.New32a() + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, id) + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + insert := func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("bench")) + return b.Put(k, []byte("filler")) + } + if err := db.Update(insert); err != nil { + b.Error(err) + return + } + }(uint32(round)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func BenchmarkDBBatchManual10x100(b *testing.B) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("bench")) + return err + }); err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + start := make(chan struct{}) + var wg sync.WaitGroup + + for major := 0; major < 10; major++ { + wg.Add(1) + go func(id uint32) { + defer wg.Done() + <-start + + insert100 := func(tx *bolt.Tx) error { + h := fnv.New32a() + buf := make([]byte, 4) + for minor := uint32(0); minor < 100; minor++ { + binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) + h.Reset() + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + b := tx.Bucket([]byte("bench")) + if err := b.Put(k, []byte("filler")); err != nil { + return err + } + } + return nil + } + if err := db.Update(insert100); err != nil { + b.Fatal(err) + } + }(uint32(major)) + } + close(start) + wg.Wait() + } + + b.StopTimer() + validateBatchBench(b, db) +} + +func validateBatchBench(b *testing.B, db *DB) { + var rollback = errors.New("sentinel error to cause rollback") + validate := func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte("bench")) + h := fnv.New32a() + buf := make([]byte, 4) + for id := uint32(0); id < 1000; id++ { + binary.LittleEndian.PutUint32(buf, id) + h.Reset() + _, _ = h.Write(buf[:]) + k := h.Sum(nil) + v := bucket.Get(k) + if v == nil { + b.Errorf("not found id=%d key=%x", id, k) + continue + } + if g, e := v, []byte("filler"); !bytes.Equal(g, e) { + b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) + } + if err := bucket.Delete(k); err != nil { + return err + } + } + // should be empty now + c := bucket.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + b.Errorf("unexpected key: %x = %q", k, v) + } + return rollback + } + if err := db.Update(validate); err != nil && err != rollback { + b.Error(err) + } +} + +// DB is a test wrapper for bolt.DB. +type DB struct { *bolt.DB } -// NewTestDB returns a new instance of TestDB. -func NewTestDB() *TestDB { +// MustOpenDB returns a new, open DB at a temporary location. +func MustOpenDB() *DB { db, err := bolt.Open(tempfile(), 0666, nil) if err != nil { - panic("cannot open db: " + err.Error()) - } - return &TestDB{db} -} - -// MustView executes a read-only function. Panic on error. -func (db *TestDB) MustView(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustUpdate executes a read-write function. Panic on error. -func (db *TestDB) MustUpdate(fn func(tx *bolt.Tx) error) { - if err := db.DB.View(func(tx *bolt.Tx) error { - return fn(tx) - }); err != nil { - panic(err.Error()) - } -} - -// MustCreateBucket creates a new bucket. Panic on error. -func (db *TestDB) MustCreateBucket(name []byte) { - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte(name)) - return err - }); err != nil { - panic(err.Error()) + panic(err) } + return &DB{db} } // Close closes the database and deletes the underlying file. -func (db *TestDB) Close() { +func (db *DB) Close() error { // Log statistics. if *statsFlag { db.PrintStats() @@ -890,11 +1522,18 @@ func (db *TestDB) Close() { // Close database and remove file. defer os.Remove(db.Path()) - db.DB.Close() + return db.DB.Close() +} + +// MustClose closes the database and deletes the underlying file. Panic on error. +func (db *DB) MustClose() { + if err := db.Close(); err != nil { + panic(err) + } } // PrintStats prints the database stats -func (db *TestDB) PrintStats() { +func (db *DB) PrintStats() { var stats = db.Stats() fmt.Printf("[db] %-20s %-20s %-20s\n", fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), @@ -909,8 +1548,8 @@ func (db *TestDB) PrintStats() { } // MustCheck runs a consistency check on the database and panics if any errors are found. -func (db *TestDB) MustCheck() { - db.Update(func(tx *bolt.Tx) error { +func (db *DB) MustCheck() { + if err := db.Update(func(tx *bolt.Tx) error { // Collect all the errors. var errors []error for err := range tx.Check() { @@ -923,7 +1562,9 @@ func (db *TestDB) MustCheck() { // If errors occurred, copy the DB and print the errors. if len(errors) > 0 { var path = tempfile() - tx.CopyFile(path, 0600) + if err := tx.CopyFile(path, 0600); err != nil { + panic(err) + } // Print errors. fmt.Print("\n\n") @@ -939,31 +1580,46 @@ func (db *TestDB) MustCheck() { } return nil - }) + }); err != nil && err != bolt.ErrDatabaseNotOpen { + panic(err) + } } // CopyTempFile copies a database to a temporary file. -func (db *TestDB) CopyTempFile() { +func (db *DB) CopyTempFile() { path := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(path, 0600) }) + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(path, 0600) + }); err != nil { + panic(err) + } fmt.Println("db copied to: ", path) } // tempfile returns a temporary file path. func tempfile() string { - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) + f, err := ioutil.TempFile("", "bolt-") + if err != nil { + panic(err) + } + if err := f.Close(); err != nil { + panic(err) + } + if err := os.Remove(f.Name()); err != nil { + panic(err) + } return f.Name() } // mustContainKeys checks that a bucket contains a given set of keys. func mustContainKeys(b *bolt.Bucket, m map[string]string) { found := make(map[string]string) - b.ForEach(func(k, _ []byte) error { + if err := b.ForEach(func(k, _ []byte) error { found[string(k)] = "" return nil - }) + }); err != nil { + panic(err) + } // Check for keys found in bucket that shouldn't be there. var keys []string diff --git a/freelist_test.go b/freelist_test.go index 8caeab2..4e9b3a8 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -117,7 +117,9 @@ func TestFreelist_write(t *testing.T) { f.pending[100] = []pgid{28, 11} f.pending[101] = []pgid{3} p := (*page)(unsafe.Pointer(&buf[0])) - f.write(p) + if err := f.write(p); err != nil { + t.Fatal(err) + } // Read the page back out. f2 := newFreelist() diff --git a/simulation_test.go b/simulation_test.go index ceb8bae..c691527 100644 --- a/simulation_test.go +++ b/simulation_test.go @@ -42,8 +42,8 @@ func testSimulate(t *testing.T, threadCount, parallelism int) { var versions = make(map[int]*QuickDB) versions[1] = NewQuickDB() - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() var mutex sync.Mutex @@ -89,10 +89,12 @@ func testSimulate(t *testing.T, threadCount, parallelism int) { versions[tx.ID()] = qdb mutex.Unlock() - ok(t, tx.Commit()) + if err := tx.Commit(); err != nil { + t.Fatal(err) + } }() } else { - defer tx.Rollback() + defer func() { _ = tx.Rollback() }() } // Ignore operation if we don't have data yet. diff --git a/tx.go b/tx.go index d16f9f5..5f4c9f0 100644 --- a/tx.go +++ b/tx.go @@ -285,7 +285,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer f.Close() + defer func() { _ = f.Close() }() // Copy the meta pages. tx.db.metalock.Lock() diff --git a/tx_test.go b/tx_test.go index 6c8271a..7a274e4 100644 --- a/tx_test.go +++ b/tx_test.go @@ -1,8 +1,10 @@ package bolt_test import ( + "bytes" "errors" "fmt" + "log" "os" "testing" @@ -10,331 +12,519 @@ import ( ) // Ensure that committing a closed transaction returns an error. -func TestTx_Commit_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.CreateBucket([]byte("foo")) - ok(t, tx.Commit()) - equals(t, tx.Commit(), bolt.ErrTxClosed) +func TestTx_Commit_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("foo")); err != nil { + t.Fatal(err) + } + + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + if err := tx.Commit(); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that rolling back a closed transaction returns an error. -func TestTx_Rollback_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - ok(t, tx.Rollback()) - equals(t, tx.Rollback(), bolt.ErrTxClosed) +func TestTx_Rollback_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + + if err := tx.Rollback(); err != nil { + t.Fatal(err) + } + if err := tx.Rollback(); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that committing a read-only transaction returns an error. -func TestTx_Commit_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(false) - equals(t, tx.Commit(), bolt.ErrTxNotWritable) +func TestTx_Commit_ErrTxNotWritable(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(false) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != bolt.ErrTxNotWritable { + t.Fatal(err) + } } // Ensure that a transaction can retrieve a cursor on the root bucket. func TestTx_Cursor(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.CreateBucket([]byte("woojits")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("woojits")); err != nil { + t.Fatal(err) + } + c := tx.Cursor() + if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } - k, v := c.First() - equals(t, "widgets", string(k)) - assert(t, v == nil, "") + if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", v) + } - k, v = c.Next() - equals(t, "woojits", string(k)) - assert(t, v == nil, "") - - k, v = c.Next() - assert(t, k == nil, "") - assert(t, v == nil, "") + if k, v := c.Next(); k != nil { + t.Fatalf("unexpected key: %v", k) + } else if v != nil { + t.Fatalf("unexpected value: %v", k) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that creating a bucket with a read-only transaction returns an error. -func TestTx_CreateBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxNotWritable, err) +func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.View(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket([]byte("foo")) + if err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that creating a bucket on a closed transaction returns an error. -func TestTx_CreateBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - b, err := tx.CreateBucket([]byte("foo")) - assert(t, b == nil, "") - equals(t, bolt.ErrTxClosed, err) +func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + + if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that a Tx can retrieve a bucket. func TestTx_Bucket(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a Tx retrieving a non-existent key returns nil. -func TestTx_Get_Missing(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key")) - assert(t, value == nil, "") +func TestTx_Get_NotFound(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if b.Get([]byte("no_such_key")) != nil { + t.Fatal("expected nil value") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can be created and retrieved. func TestTx_CreateBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Create a bucket. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) + if err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can be created if it doesn't already exist. func TestTx_CreateBucketIfNotExists(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + // Create bucket. + if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } - b, err = tx.CreateBucketIfNotExists([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) + // Create bucket again. + if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { + t.Fatal(err) + } else if b == nil { + t.Fatal("expected bucket") + } - b, err = tx.CreateBucketIfNotExists([]byte{}) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) - - b, err = tx.CreateBucketIfNotExists(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) return nil - }) + }); err != nil { + t.Fatal(err) + } // Read the bucket through a separate transaction. - db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - assert(t, b != nil, "") + if err := db.View(func(tx *bolt.Tx) error { + if tx.Bucket([]byte("widgets")) == nil { + t.Fatal("expected bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } +} + +// Ensure transaction returns an error if creating an unnamed bucket. +func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + + if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } + + return nil + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket cannot be created twice. -func TestTx_CreateBucket_Exists(t *testing.T) { - db := NewTestDB() - defer db.Close() +func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + // Create a bucket. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } // Create the same bucket again. - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketExists, err) + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket is created with a non-blank name. -func TestTx_CreateBucket_NameRequired(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(nil) - assert(t, b == nil, "") - equals(t, bolt.ErrBucketNameRequired, err) +func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that a bucket can be deleted. func TestTx_DeleteBucket(t *testing.T) { - db := NewTestDB() - defer db.Close() + db := MustOpenDB() + defer db.MustClose() // Create a bucket and add a value. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } // Delete the bucket and make sure we can't get the value. - db.Update(func(tx *bolt.Tx) error { - ok(t, tx.DeleteBucket([]byte("widgets"))) - assert(t, tx.Bucket([]byte("widgets")) == nil, "") + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + if tx.Bucket([]byte("widgets")) != nil { + t.Fatal("unexpected bucket") + } return nil - }) + }); err != nil { + t.Fatal(err) + } - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { // Create the bucket again and make sure there's not a phantom value. b, err := tx.CreateBucket([]byte("widgets")) - assert(t, b != nil, "") - ok(t, err) - assert(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")) == nil, "") + if err != nil { + t.Fatal(err) + } + if v := b.Get([]byte("foo")); v != nil { + t.Fatalf("unexpected phantom value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that deleting a bucket on a closed transaction returns an error. -func TestTx_DeleteBucket_Closed(t *testing.T) { - db := NewTestDB() - defer db.Close() - tx, _ := db.Begin(true) - tx.Commit() - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxClosed) +func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + tx, err := db.Begin(true) + if err != nil { + t.Fatal(err) + } + if err := tx.Commit(); err != nil { + t.Fatal(err) + } + if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed { + t.Fatalf("unexpected error: %s", err) + } } // Ensure that deleting a bucket with a read-only transaction returns an error. func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.View(func(tx *bolt.Tx) error { - equals(t, tx.DeleteBucket([]byte("foo")), bolt.ErrTxNotWritable) + db := MustOpenDB() + defer db.MustClose() + if err := db.View(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that nothing happens when deleting a bucket that doesn't exist. func TestTx_DeleteBucket_NotFound(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - equals(t, bolt.ErrBucketNotFound, tx.DeleteBucket([]byte("widgets"))) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that no error is returned when a tx.ForEach function does not return // an error. func TestTx_ForEach_NoError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } - equals(t, nil, tx.ForEach(func(name []byte, b *bolt.Bucket) error { + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { return nil - })) + }); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that an error is returned when a tx.ForEach function returns an error. func TestTx_ForEach_WithError(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } - err := errors.New("foo") - equals(t, err, tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return err - })) + marker := errors.New("marker") + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return marker + }); err != marker { + t.Fatalf("unexpected error: %s", err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } } // Ensure that Tx commit handlers are called after a transaction successfully commits. func TestTx_OnCommit(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { tx.OnCommit(func() { x += 1 }) tx.OnCommit(func() { x += 2 }) - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - equals(t, 3, x) + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } + return nil + }); err != nil { + t.Fatal(err) + } else if x != 3 { + t.Fatalf("unexpected x: %d", x) + } } // Ensure that Tx commit handlers are NOT called after a transaction rolls back. func TestTx_OnCommit_Rollback(t *testing.T) { + db := MustOpenDB() + defer db.MustClose() + var x int - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { tx.OnCommit(func() { x += 1 }) tx.OnCommit(func() { x += 2 }) - tx.CreateBucket([]byte("widgets")) + if _, err := tx.CreateBucket([]byte("widgets")); err != nil { + t.Fatal(err) + } return errors.New("rollback this commit") - }) - equals(t, 0, x) + }); err == nil || err.Error() != "rollback this commit" { + t.Fatalf("unexpected error: %s", err) + } else if x != 0 { + t.Fatalf("unexpected x: %d", x) + } } // Ensure that the database can be copied to a file path. func TestTx_CopyFile(t *testing.T) { - db := NewTestDB() - defer db.Close() - var dest = tempfile() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + db := MustOpenDB() + defer db.MustClose() + + path := tempfile() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } - ok(t, db.View(func(tx *bolt.Tx) error { return tx.CopyFile(dest, 0600) })) + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(path, 0600) + }); err != nil { + t.Fatal(err) + } - db2, err := bolt.Open(dest, 0600, nil) - ok(t, err) - defer db2.Close() + db2, err := bolt.Open(path, 0600, nil) + if err != nil { + t.Fatal(err) + } - db2.View(func(tx *bolt.Tx) error { - equals(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo"))) - equals(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz"))) + if err := db2.View(func(tx *bolt.Tx) error { + if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { + t.Fatalf("unexpected value: %v", v) + } + if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { + t.Fatalf("unexpected value: %v", v) + } return nil - }) + }); err != nil { + t.Fatal(err) + } + + if err := db2.Close(); err != nil { + t.Fatal(err) + } } type failWriterError struct{} @@ -360,63 +550,107 @@ func (f *failWriter) Write(p []byte) (n int, err error) { // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Meta(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{}) }) - equals(t, err.Error(), "meta copy: error injected for tests") + if err := db.View(func(tx *bolt.Tx) error { + return tx.Copy(&failWriter{}) + }); err == nil || err.Error() != "meta copy: error injected for tests" { + t.Fatal("unexpected error: %s", err) + } } // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Normal(t *testing.T) { - db := NewTestDB() - defer db.Close() - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat")) + db := MustOpenDB() + defer db.MustClose() + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + t.Fatal(err) + } + if err := b.Put([]byte("baz"), []byte("bat")); err != nil { + t.Fatal(err) + } return nil - }) + }); err != nil { + t.Fatal(err) + } - err := db.View(func(tx *bolt.Tx) error { return tx.Copy(&failWriter{3 * db.Info().PageSize}) }) - equals(t, err.Error(), "error injected for tests") + if err := db.View(func(tx *bolt.Tx) error { + return tx.Copy(&failWriter{3 * db.Info().PageSize}) + }); err == nil || err.Error() != "error injected for tests" { + t.Fatal("unexpected error: %s", err) + } } func ExampleTx_Rollback() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Create a bucket. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err - }) + }); err != nil { + log.Fatal(err) + } // Set a value for a key. - db.Update(func(tx *bolt.Tx) error { + if err := db.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - }) + }); err != nil { + log.Fatal(err) + } // Update the key but rollback the transaction so it never saves. - tx, _ := db.Begin(true) + tx, err := db.Begin(true) + if err != nil { + log.Fatal(err) + } b := tx.Bucket([]byte("widgets")) - b.Put([]byte("foo"), []byte("baz")) - tx.Rollback() + if err := b.Put([]byte("foo"), []byte("baz")); err != nil { + log.Fatal(err) + } + if err := tx.Rollback(); err != nil { + log.Fatal(err) + } // Ensure that our original value is still set. - db.View(func(tx *bolt.Tx) error { + if err := db.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value for 'foo' is still: %s\n", value) return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } // Output: // The value for 'foo' is still: bar @@ -424,32 +658,58 @@ func ExampleTx_Rollback() { func ExampleTx_CopyFile() { // Open the database. - db, _ := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0666, nil) + if err != nil { + log.Fatal(err) + } defer os.Remove(db.Path()) - defer db.Close() // Create a bucket and a key. - db.Update(func(tx *bolt.Tx) error { - tx.CreateBucket([]byte("widgets")) - tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("widgets")) + if err != nil { + return err + } + if err := b.Put([]byte("foo"), []byte("bar")); err != nil { + return err + } return nil - }) + }); err != nil { + log.Fatal(err) + } // Copy the database to another file. toFile := tempfile() - db.View(func(tx *bolt.Tx) error { return tx.CopyFile(toFile, 0666) }) + if err := db.View(func(tx *bolt.Tx) error { + return tx.CopyFile(toFile, 0666) + }); err != nil { + log.Fatal(err) + } defer os.Remove(toFile) // Open the cloned database. - db2, _ := bolt.Open(toFile, 0666, nil) - defer db2.Close() + db2, err := bolt.Open(toFile, 0666, nil) + if err != nil { + log.Fatal(err) + } // Ensure that the key exists in the copy. - db2.View(func(tx *bolt.Tx) error { + if err := db2.View(func(tx *bolt.Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value for 'foo' in the clone is: %s\n", value) return nil - }) + }); err != nil { + log.Fatal(err) + } + + // Close database to release file lock. + if err := db.Close(); err != nil { + log.Fatal(err) + } + + if err := db2.Close(); err != nil { + log.Fatal(err) + } // Output: // The value for 'foo' in the clone is: bar