diff --git a/bucket_test.go b/bucket_test.go
index 029ff2b..6e5aed7 100644
--- a/bucket_test.go
+++ b/bucket_test.go
@@ -17,93 +17,93 @@ import (
 
 // Ensure that a bucket that gets a non-existent key returns nil.
 func TestBucket_Get_NonExistent(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
-			assert.Nil(t, value)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+		assert.Nil(t, value)
+		return nil
 	})
 }
 
 // Ensure that a bucket can read a value that is not flushed yet.
 func TestBucket_Get_FromNode(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			b.Put([]byte("foo"), []byte("bar"))
-			value := b.Get([]byte("foo"))
-			assert.Equal(t, value, []byte("bar"))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		b.Put([]byte("foo"), []byte("bar"))
+		value := b.Get([]byte("foo"))
+		assert.Equal(t, value, []byte("bar"))
+		return nil
 	})
 }
 
 // Ensure that a bucket retrieved via Get() returns a nil.
 func TestBucket_Get_IncompatibleValue(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+		return nil
 	})
 }
 
 // Ensure that a bucket can write a key/value.
 func TestBucket_Put(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			assert.NoError(t, err)
-			value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
-			assert.Equal(t, value, []byte("bar"))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		assert.NoError(t, err)
+		value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+		assert.Equal(t, value, []byte("bar"))
+		return nil
 	})
 }
 
 // Ensure that a bucket can rewrite a key in the same transaction.
 func TestBucket_Put_Repeat(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			assert.NoError(t, b.Put([]byte("foo"), []byte("bar")))
-			assert.NoError(t, b.Put([]byte("foo"), []byte("baz")))
-			value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
-			assert.Equal(t, value, []byte("baz"))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		assert.NoError(t, b.Put([]byte("foo"), []byte("bar")))
+		assert.NoError(t, b.Put([]byte("foo"), []byte("baz")))
+		value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+		assert.Equal(t, value, []byte("baz"))
+		return nil
 	})
 }
 
 // Ensure that a bucket can write a bunch of large values.
 func TestBucket_Put_Large(t *testing.T) {
-	var count = 100
-	var factor = 200
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			for i := 1; i < count; i++ {
-				assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))))
-			}
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			for i := 1; i < count; i++ {
-				value := b.Get([]byte(strings.Repeat("0", i*factor)))
-				assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value)
-			}
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+
+	count, factor := 100, 200
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		for i := 1; i < count; i++ {
+			assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))))
+		}
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		for i := 1; i < count; i++ {
+			value := b.Get([]byte(strings.Repeat("0", i*factor)))
+			assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value)
+		}
+		return nil
 	})
 }
 
@@ -116,102 +116,103 @@ func TestDB_Put_VeryLarge(t *testing.T) {
 	n, batchN := 400000, 200000
 	ksize, vsize := 8, 500
 
-	withOpenDB(func(db *DB, path string) {
-		for i := 0; i < n; i += batchN {
-			err := db.Update(func(tx *Tx) error {
-				b, _ := tx.CreateBucketIfNotExists([]byte("widgets"))
-				for j := 0; j < batchN; j++ {
-					k, v := make([]byte, ksize), make([]byte, vsize)
-					binary.BigEndian.PutUint32(k, uint32(i+j))
-					assert.NoError(t, b.Put(k, v))
-				}
-				return nil
-			})
-			assert.NoError(t, err)
-		}
-	})
+	db := NewTestDB()
+	defer db.Close()
+
+	for i := 0; i < n; i += batchN {
+		err := db.Update(func(tx *Tx) error {
+			b, _ := tx.CreateBucketIfNotExists([]byte("widgets"))
+			for j := 0; j < batchN; j++ {
+				k, v := make([]byte, ksize), make([]byte, vsize)
+				binary.BigEndian.PutUint32(k, uint32(i+j))
+				assert.NoError(t, b.Put(k, v))
+			}
+			return nil
+		})
+		assert.NoError(t, err)
+	}
 }
 
 // Ensure that a setting a value on a key with a bucket value returns an error.
 func TestBucket_Put_IncompatibleValue(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+		return nil
 	})
 }
 
 // Ensure that a setting a value while the transaction is closed returns an error.
 func TestBucket_Put_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.CreateBucket([]byte("widgets"))
-		b := tx.Bucket([]byte("widgets"))
-		tx.Rollback()
-		assert.Equal(t, ErrTxClosed, b.Put([]byte("foo"), []byte("bar")))
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.CreateBucket([]byte("widgets"))
+	b := tx.Bucket([]byte("widgets"))
+	tx.Rollback()
+	assert.Equal(t, ErrTxClosed, b.Put([]byte("foo"), []byte("bar")))
 }
 
 // Ensure that setting a value on a read-only bucket returns an error.
 func TestBucket_Put_ReadOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			err := b.Put([]byte("foo"), []byte("bar"))
-			assert.Equal(t, err, ErrTxNotWritable)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		err := b.Put([]byte("foo"), []byte("bar"))
+		assert.Equal(t, err, ErrTxNotWritable)
+		return nil
 	})
 }
 
 // Ensure that a bucket can delete an existing key.
 func TestBucket_Delete(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
-			assert.NoError(t, err)
-			value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
-			assert.Nil(t, value)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
+		assert.NoError(t, err)
+		value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+		assert.Nil(t, value)
+		return nil
 	})
 }
 
 // Ensure that deleting a large set of keys will work correctly.
 func TestBucket_Delete_Large(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			var b, _ = tx.CreateBucket([]byte("widgets"))
-			for i := 0; i < 100; i++ {
-				assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))))
-			}
-			return nil
-		})
-		db.Update(func(tx *Tx) error {
-			var b = tx.Bucket([]byte("widgets"))
-			for i := 0; i < 100; i++ {
-				assert.NoError(t, b.Delete([]byte(strconv.Itoa(i))))
-			}
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			var b = tx.Bucket([]byte("widgets"))
-			for i := 0; i < 100; i++ {
-				assert.Nil(t, b.Get([]byte(strconv.Itoa(i))))
-			}
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		var b, _ = tx.CreateBucket([]byte("widgets"))
+		for i := 0; i < 100; i++ {
+			assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))))
+		}
+		return nil
+	})
+	db.Update(func(tx *Tx) error {
+		var b = tx.Bucket([]byte("widgets"))
+		for i := 0; i < 100; i++ {
+			assert.NoError(t, b.Delete([]byte(strconv.Itoa(i))))
+		}
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		var b = tx.Bucket([]byte("widgets"))
+		for i := 0; i < 100; i++ {
+			assert.Nil(t, b.Get([]byte(strconv.Itoa(i))))
+		}
+		return nil
 	})
 }
 
@@ -221,446 +222,447 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
 		t.Skip("skipping test in short mode.")
 	}
 
-	withOpenDB(func(db *DB, path string) {
-		k := make([]byte, 16)
-		for i := uint64(0); i < 10000; i++ {
-			err := db.Update(func(tx *Tx) error {
-				b, err := tx.CreateBucketIfNotExists([]byte("0"))
-				if err != nil {
-					t.Fatalf("bucket error: %s", err)
-				}
-
-				for j := uint64(0); j < 1000; j++ {
-					binary.BigEndian.PutUint64(k[:8], i)
-					binary.BigEndian.PutUint64(k[8:], j)
-					if err := b.Put(k, nil); err != nil {
-						t.Fatalf("put error: %s", err)
-					}
-				}
-
-				return nil
-			})
-
-			if err != nil {
-				t.Fatalf("update error: %s", err)
-			}
-		}
-
-		// Delete all of them in one large transaction
+	db := NewTestDB()
+	defer db.Close()
+	k := make([]byte, 16)
+	for i := uint64(0); i < 10000; i++ {
 		err := db.Update(func(tx *Tx) error {
-			b := tx.Bucket([]byte("0"))
-			c := b.Cursor()
-			for k, _ := c.First(); k != nil; k, _ = c.Next() {
-				b.Delete(k)
+			b, err := tx.CreateBucketIfNotExists([]byte("0"))
+			if err != nil {
+				t.Fatalf("bucket error: %s", err)
 			}
+
+			for j := uint64(0); j < 1000; j++ {
+				binary.BigEndian.PutUint64(k[:8], i)
+				binary.BigEndian.PutUint64(k[8:], j)
+				if err := b.Put(k, nil); err != nil {
+					t.Fatalf("put error: %s", err)
+				}
+			}
+
 			return nil
 		})
 
-		// Check that a freelist overflow occurred.
-		assert.NoError(t, err)
+		if err != nil {
+			t.Fatalf("update error: %s", err)
+		}
+	}
+
+	// Delete all of them in one large transaction
+	err := db.Update(func(tx *Tx) error {
+		b := tx.Bucket([]byte("0"))
+		c := b.Cursor()
+		for k, _ := c.First(); k != nil; k, _ = c.Next() {
+			b.Delete(k)
+		}
+		return nil
 	})
+
+	// Check that a freelist overflow occurred.
+	assert.NoError(t, err)
 }
 
 // Ensure that accessing and updating nested buckets is ok across transactions.
 func TestBucket_Nested(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			// Create a widgets bucket.
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		// Create a widgets bucket.
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
 
-			// Create a widgets/foo bucket.
-			_, err = b.CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
+		// Create a widgets/foo bucket.
+		_, err = b.CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
 
-			// Create a widgets/bar key.
-			assert.NoError(t, b.Put([]byte("bar"), []byte("0000")))
+		// Create a widgets/bar key.
+		assert.NoError(t, b.Put([]byte("bar"), []byte("0000")))
 
-			return nil
-		})
-		mustCheck(db)
+		return nil
+	})
+	db.MustCheck()
 
-		// Update widgets/bar.
-		db.Update(func(tx *Tx) error {
-			var b = tx.Bucket([]byte("widgets"))
-			assert.NoError(t, b.Put([]byte("bar"), []byte("xxxx")))
-			return nil
-		})
-		mustCheck(db)
+	// Update widgets/bar.
+	db.Update(func(tx *Tx) error {
+		var b = tx.Bucket([]byte("widgets"))
+		assert.NoError(t, b.Put([]byte("bar"), []byte("xxxx")))
+		return nil
+	})
+	db.MustCheck()
 
-		// Cause a split.
-		db.Update(func(tx *Tx) error {
-			var b = tx.Bucket([]byte("widgets"))
-			for i := 0; i < 10000; i++ {
-				assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))))
-			}
-			return nil
-		})
-		mustCheck(db)
+	// Cause a split.
+	db.Update(func(tx *Tx) error {
+		var b = tx.Bucket([]byte("widgets"))
+		for i := 0; i < 10000; i++ {
+			assert.NoError(t, b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))))
+		}
+		return nil
+	})
+	db.MustCheck()
 
-		// Insert into widgets/foo/baz.
-		db.Update(func(tx *Tx) error {
-			var b = tx.Bucket([]byte("widgets"))
-			assert.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")))
-			return nil
-		})
-		mustCheck(db)
+	// Insert into widgets/foo/baz.
+	db.Update(func(tx *Tx) error {
+		var b = tx.Bucket([]byte("widgets"))
+		assert.NoError(t, b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")))
+		return nil
+	})
+	db.MustCheck()
 
-		// Verify.
-		db.View(func(tx *Tx) error {
-			var b = tx.Bucket([]byte("widgets"))
-			assert.Equal(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz")))
-			assert.Equal(t, []byte("xxxx"), b.Get([]byte("bar")))
-			for i := 0; i < 10000; i++ {
-				assert.Equal(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i))))
-			}
-			return nil
-		})
+	// Verify.
+	db.View(func(tx *Tx) error {
+		var b = tx.Bucket([]byte("widgets"))
+		assert.Equal(t, []byte("yyyy"), b.Bucket([]byte("foo")).Get([]byte("baz")))
+		assert.Equal(t, []byte("xxxx"), b.Get([]byte("bar")))
+		for i := 0; i < 10000; i++ {
+			assert.Equal(t, []byte(strconv.Itoa(i)), b.Get([]byte(strconv.Itoa(i))))
+		}
+		return nil
 	})
 }
 
 // Ensure that deleting a bucket using Delete() returns an error.
 func TestBucket_Delete_Bucket(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			_, err := b.CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			assert.Equal(t, ErrIncompatibleValue, b.Delete([]byte("foo")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		_, err := b.CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		assert.Equal(t, ErrIncompatibleValue, b.Delete([]byte("foo")))
+		return nil
 	})
 }
 
 // Ensure that deleting a key on a read-only bucket returns an error.
 func TestBucket_Delete_ReadOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			err := b.Delete([]byte("foo"))
-			assert.Equal(t, err, ErrTxNotWritable)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		err := b.Delete([]byte("foo"))
+		assert.Equal(t, err, ErrTxNotWritable)
+		return nil
 	})
 }
 
 // Ensure that a deleting value while the transaction is closed returns an error.
 func TestBucket_Delete_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.CreateBucket([]byte("widgets"))
-		b := tx.Bucket([]byte("widgets"))
-		tx.Rollback()
-		assert.Equal(t, ErrTxClosed, b.Delete([]byte("foo")))
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.CreateBucket([]byte("widgets"))
+	b := tx.Bucket([]byte("widgets"))
+	tx.Rollback()
+	assert.Equal(t, ErrTxClosed, b.Delete([]byte("foo")))
 }
 
 // Ensure that deleting a bucket causes nested buckets to be deleted.
 func TestBucket_DeleteBucket_Nested(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			_, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar"))
-			assert.NoError(t, err)
-			assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
-			assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		_, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar"))
+		assert.NoError(t, err)
+		assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
+		assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
+		return nil
 	})
 }
 
 // Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
 func TestBucket_DeleteBucket_Nested2(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			_, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar"))
-			assert.NoError(t, err)
-			assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
-			return nil
-		})
-		db.Update(func(tx *Tx) error {
-			assert.NotNil(t, tx.Bucket([]byte("widgets")))
-			assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
-			assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")))
-			assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz")))
-			assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			assert.Nil(t, tx.Bucket([]byte("widgets")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		_, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		_, err = tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar"))
+		assert.NoError(t, err)
+		assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
+		return nil
+	})
+	db.Update(func(tx *Tx) error {
+		assert.NotNil(t, tx.Bucket([]byte("widgets")))
+		assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
+		assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")))
+		assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz")))
+		assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		assert.Nil(t, tx.Bucket([]byte("widgets")))
+		return nil
 	})
 }
 
 // Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
 func TestBucket_DeleteBucket_Large(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			_, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
-			for i := 0; i < 1000; i++ {
-				assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))))
-			}
-			return nil
-		})
-		db.Update(func(tx *Tx) error {
-			assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
-			return nil
-		})
-
-		// NOTE: Consistency check in withOpenDB() will error if pages not freed properly.
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		_, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
+		for i := 0; i < 1000; i++ {
+			assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))))
+		}
+		return nil
 	})
+	db.Update(func(tx *Tx) error {
+		assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
+		return nil
+	})
+
+	// NOTE: Consistency check in TestDB.Close() will panic if pages not freed properly.
 }
 
 // Ensure that a simple value retrieved via Bucket() returns a nil.
 func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
-			assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+		assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
+		return nil
 	})
 }
 
 // Ensure that creating a bucket on an existing non-bucket key returns an error.
 func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
-			_, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
-			assert.Equal(t, ErrIncompatibleValue, err)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+		_, err = tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo"))
+		assert.Equal(t, ErrIncompatibleValue, err)
+		return nil
 	})
 }
 
 // Ensure that deleting a bucket on an existing non-bucket key returns an error.
 func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
-			assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+		assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
+		return nil
 	})
 }
 
 // Ensure that a bucket can return an autoincrementing sequence.
 func TestBucket_NextSequence(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.CreateBucket([]byte("woojits"))
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.CreateBucket([]byte("woojits"))
 
-			// Make sure sequence increments.
-			seq, err := tx.Bucket([]byte("widgets")).NextSequence()
-			assert.NoError(t, err)
-			assert.Equal(t, seq, uint64(1))
-			seq, err = tx.Bucket([]byte("widgets")).NextSequence()
-			assert.NoError(t, err)
-			assert.Equal(t, seq, uint64(2))
+		// Make sure sequence increments.
+		seq, err := tx.Bucket([]byte("widgets")).NextSequence()
+		assert.NoError(t, err)
+		assert.Equal(t, seq, uint64(1))
+		seq, err = tx.Bucket([]byte("widgets")).NextSequence()
+		assert.NoError(t, err)
+		assert.Equal(t, seq, uint64(2))
 
-			// Buckets should be separate.
-			seq, err = tx.Bucket([]byte("woojits")).NextSequence()
-			assert.NoError(t, err)
-			assert.Equal(t, seq, uint64(1))
-			return nil
-		})
+		// Buckets should be separate.
+		seq, err = tx.Bucket([]byte("woojits")).NextSequence()
+		assert.NoError(t, err)
+		assert.Equal(t, seq, uint64(1))
+		return nil
 	})
 }
 
 // Ensure that retrieving the next sequence on a read-only bucket returns an error.
 func TestBucket_NextSequence_ReadOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			i, err := b.NextSequence()
-			assert.Equal(t, i, uint64(0))
-			assert.Equal(t, err, ErrTxNotWritable)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		i, err := b.NextSequence()
+		assert.Equal(t, i, uint64(0))
+		assert.Equal(t, err, ErrTxNotWritable)
+		return nil
 	})
 }
 
 // Ensure that retrieving the next sequence for a bucket on a closed database return an error.
 func TestBucket_NextSequence_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.CreateBucket([]byte("widgets"))
-		b := tx.Bucket([]byte("widgets"))
-		tx.Rollback()
-		_, err := b.NextSequence()
-		assert.Equal(t, ErrTxClosed, err)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.CreateBucket([]byte("widgets"))
+	b := tx.Bucket([]byte("widgets"))
+	tx.Rollback()
+	_, err := b.NextSequence()
+	assert.Equal(t, ErrTxClosed, err)
 }
 
 // Ensure a user can loop over all key/value pairs in a bucket.
 func TestBucket_ForEach(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001"))
-			tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002"))
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001"))
+		tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002"))
 
-			var index int
-			err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
-				switch index {
-				case 0:
-					assert.Equal(t, k, []byte("bar"))
-					assert.Equal(t, v, []byte("0002"))
-				case 1:
-					assert.Equal(t, k, []byte("baz"))
-					assert.Equal(t, v, []byte("0001"))
-				case 2:
-					assert.Equal(t, k, []byte("foo"))
-					assert.Equal(t, v, []byte("0000"))
-				}
-				index++
-				return nil
-			})
-			assert.NoError(t, err)
-			assert.Equal(t, index, 3)
+		var index int
+		err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+			switch index {
+			case 0:
+				assert.Equal(t, k, []byte("bar"))
+				assert.Equal(t, v, []byte("0002"))
+			case 1:
+				assert.Equal(t, k, []byte("baz"))
+				assert.Equal(t, v, []byte("0001"))
+			case 2:
+				assert.Equal(t, k, []byte("foo"))
+				assert.Equal(t, v, []byte("0000"))
+			}
+			index++
 			return nil
 		})
+		assert.NoError(t, err)
+		assert.Equal(t, index, 3)
+		return nil
 	})
 }
 
 // Ensure a database can stop iteration early.
 func TestBucket_ForEach_ShortCircuit(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
 
-			var index int
-			err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
-				index++
-				if bytes.Equal(k, []byte("baz")) {
-					return errors.New("marker")
-				}
-				return nil
-			})
-			assert.Equal(t, errors.New("marker"), err)
-			assert.Equal(t, 2, index)
+		var index int
+		err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+			index++
+			if bytes.Equal(k, []byte("baz")) {
+				return errors.New("marker")
+			}
 			return nil
 		})
+		assert.Equal(t, errors.New("marker"), err)
+		assert.Equal(t, 2, index)
+		return nil
 	})
 }
 
 // Ensure that looping over a bucket on a closed database returns an error.
 func TestBucket_ForEach_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.CreateBucket([]byte("widgets"))
-		b := tx.Bucket([]byte("widgets"))
-		tx.Rollback()
-		err := b.ForEach(func(k, v []byte) error { return nil })
-		assert.Equal(t, ErrTxClosed, err)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.CreateBucket([]byte("widgets"))
+	b := tx.Bucket([]byte("widgets"))
+	tx.Rollback()
+	err := b.ForEach(func(k, v []byte) error { return nil })
+	assert.Equal(t, ErrTxClosed, err)
 }
 
 // Ensure that an error is returned when inserting with an empty key.
 func TestBucket_Put_EmptyKey(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar"))
-			assert.Equal(t, err, ErrKeyRequired)
-			err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar"))
-			assert.Equal(t, err, ErrKeyRequired)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar"))
+		assert.Equal(t, err, ErrKeyRequired)
+		err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar"))
+		assert.Equal(t, err, ErrKeyRequired)
+		return nil
 	})
 }
 
 // Ensure that an error is returned when inserting with a key that's too large.
 func TestBucket_Put_KeyTooLarge(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar"))
-			assert.Equal(t, err, ErrKeyTooLarge)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar"))
+		assert.Equal(t, err, ErrKeyTooLarge)
+		return nil
 	})
 }
 
 // Ensure a bucket can calculate stats.
 func TestBucket_Stats(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		// Add bucket with fewer keys but one big value.
-		big_key := []byte("really-big-value")
-		for i := 0; i < 500; i++ {
-			db.Update(func(tx *Tx) error {
-				b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
-				return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i)))
-			})
-		}
+	db := NewTestDB()
+	defer db.Close()
+
+	// Add bucket with fewer keys but one big value.
+	big_key := []byte("really-big-value")
+	for i := 0; i < 500; i++ {
 		db.Update(func(tx *Tx) error {
 			b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
-			return b.Put(big_key, []byte(strings.Repeat("*", 10000)))
+			return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i)))
 		})
+	}
+	db.Update(func(tx *Tx) error {
+		b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
+		return b.Put(big_key, []byte(strings.Repeat("*", 10000)))
+	})
 
-		mustCheck(db)
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("woojits"))
-			stats := b.Stats()
-			assert.Equal(t, 1, stats.BranchPageN, "BranchPageN")
-			assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
-			assert.Equal(t, 7, stats.LeafPageN, "LeafPageN")
-			assert.Equal(t, 2, stats.LeafOverflowN, "LeafOverflowN")
-			assert.Equal(t, 501, stats.KeyN, "KeyN")
-			assert.Equal(t, 2, stats.Depth, "Depth")
+	db.MustCheck()
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("woojits"))
+		stats := b.Stats()
+		assert.Equal(t, 1, stats.BranchPageN, "BranchPageN")
+		assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
+		assert.Equal(t, 7, stats.LeafPageN, "LeafPageN")
+		assert.Equal(t, 2, stats.LeafOverflowN, "LeafOverflowN")
+		assert.Equal(t, 501, stats.KeyN, "KeyN")
+		assert.Equal(t, 2, stats.Depth, "Depth")
 
-			branchInuse := pageHeaderSize            // branch page header
-			branchInuse += 7 * branchPageElementSize // branch elements
-			branchInuse += 7 * 3                     // branch keys (6 3-byte keys)
-			assert.Equal(t, branchInuse, stats.BranchInuse, "BranchInuse")
+		branchInuse := pageHeaderSize            // branch page header
+		branchInuse += 7 * branchPageElementSize // branch elements
+		branchInuse += 7 * 3                     // branch keys (6 3-byte keys)
+		assert.Equal(t, branchInuse, stats.BranchInuse, "BranchInuse")
 
-			leafInuse := 7 * pageHeaderSize          // leaf page header
-			leafInuse += 501 * leafPageElementSize   // leaf elements
-			leafInuse += 500*3 + len(big_key)        // leaf keys
-			leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
-			assert.Equal(t, leafInuse, stats.LeafInuse, "LeafInuse")
+		leafInuse := 7 * pageHeaderSize          // leaf page header
+		leafInuse += 501 * leafPageElementSize   // leaf elements
+		leafInuse += 500*3 + len(big_key)        // leaf keys
+		leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
+		assert.Equal(t, leafInuse, stats.LeafInuse, "LeafInuse")
 
-			if os.Getpagesize() == 4096 {
-				// Incompatible page size
-				assert.Equal(t, 4096, stats.BranchAlloc, "BranchAlloc")
-				assert.Equal(t, 36864, stats.LeafAlloc, "LeafAlloc")
-			}
+		if os.Getpagesize() == 4096 {
+			// Incompatible page size
+			assert.Equal(t, 4096, stats.BranchAlloc, "BranchAlloc")
+			assert.Equal(t, 36864, stats.LeafAlloc, "LeafAlloc")
+		}
 
-			assert.Equal(t, 1, stats.BucketN, "BucketN")
-			assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN")
-			assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse")
-			return nil
-		})
+		assert.Equal(t, 1, stats.BucketN, "BucketN")
+		assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN")
+		assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse")
+		return nil
 	})
 }
 
@@ -668,179 +670,179 @@ func TestBucket_Stats(t *testing.T) {
 func TestBucket_Stats_RandomFill(t *testing.T) {
 	if testing.Short() {
 		t.Skip("skipping test in short mode.")
-	}
-	if os.Getpagesize() != 4096 {
+	} else if os.Getpagesize() != 4096 {
 		t.Skip("invalid page size for test")
 	}
 
-	withOpenDB(func(db *DB, path string) {
-		// Add a set of values in random order. It will be the same random
-		// order so we can maintain consistency between test runs.
-		var count int
-		r := rand.New(rand.NewSource(42))
-		for _, i := range r.Perm(1000) {
-			db.Update(func(tx *Tx) error {
-				b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
-				b.FillPercent = 0.9
-				for _, j := range r.Perm(100) {
-					index := (j * 10000) + i
-					b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000"))
-					count++
-				}
-				return nil
-			})
-		}
-		mustCheck(db)
+	db := NewTestDB()
+	defer db.Close()
 
-		db.View(func(tx *Tx) error {
-			s := tx.Bucket([]byte("woojits")).Stats()
-			assert.Equal(t, 100000, s.KeyN, "KeyN")
-
-			assert.Equal(t, 98, s.BranchPageN, "BranchPageN")
-			assert.Equal(t, 0, s.BranchOverflowN, "BranchOverflowN")
-			assert.Equal(t, 130984, s.BranchInuse, "BranchInuse")
-			assert.Equal(t, 401408, s.BranchAlloc, "BranchAlloc")
-
-			assert.Equal(t, 3412, s.LeafPageN, "LeafPageN")
-			assert.Equal(t, 0, s.LeafOverflowN, "LeafOverflowN")
-			assert.Equal(t, 4742482, s.LeafInuse, "LeafInuse")
-			assert.Equal(t, 13975552, s.LeafAlloc, "LeafAlloc")
+	// Add a set of values in random order. It will be the same random
+	// order so we can maintain consistency between test runs.
+	var count int
+	r := rand.New(rand.NewSource(42))
+	for _, i := range r.Perm(1000) {
+		db.Update(func(tx *Tx) error {
+			b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
+			b.FillPercent = 0.9
+			for _, j := range r.Perm(100) {
+				index := (j * 10000) + i
+				b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000"))
+				count++
+			}
 			return nil
 		})
+	}
+	db.MustCheck()
+
+	db.View(func(tx *Tx) error {
+		s := tx.Bucket([]byte("woojits")).Stats()
+		assert.Equal(t, 100000, s.KeyN, "KeyN")
+
+		assert.Equal(t, 98, s.BranchPageN, "BranchPageN")
+		assert.Equal(t, 0, s.BranchOverflowN, "BranchOverflowN")
+		assert.Equal(t, 130984, s.BranchInuse, "BranchInuse")
+		assert.Equal(t, 401408, s.BranchAlloc, "BranchAlloc")
+
+		assert.Equal(t, 3412, s.LeafPageN, "LeafPageN")
+		assert.Equal(t, 0, s.LeafOverflowN, "LeafOverflowN")
+		assert.Equal(t, 4742482, s.LeafInuse, "LeafInuse")
+		assert.Equal(t, 13975552, s.LeafAlloc, "LeafAlloc")
+		return nil
 	})
 }
 
 // Ensure a bucket can calculate stats.
 func TestBucket_Stats_Small(t *testing.T) {
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		// Add a bucket that fits on a single root leaf.
+		b, err := tx.CreateBucket([]byte("whozawhats"))
+		assert.NoError(t, err)
+		b.Put([]byte("foo"), []byte("bar"))
 
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			// Add a bucket that fits on a single root leaf.
-			b, err := tx.CreateBucket([]byte("whozawhats"))
-			assert.NoError(t, err)
-			b.Put([]byte("foo"), []byte("bar"))
-
-			return nil
-		})
-		mustCheck(db)
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("whozawhats"))
-			stats := b.Stats()
-			assert.Equal(t, 0, stats.BranchPageN, "BranchPageN")
-			assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
-			assert.Equal(t, 0, stats.LeafPageN, "LeafPageN")
-			assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
-			assert.Equal(t, 1, stats.KeyN, "KeyN")
-			assert.Equal(t, 1, stats.Depth, "Depth")
-			assert.Equal(t, 0, stats.BranchInuse, "BranchInuse")
-			assert.Equal(t, 0, stats.LeafInuse, "LeafInuse")
-			if os.Getpagesize() == 4096 {
-				// Incompatible page size
-				assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc")
-				assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc")
-			}
-			assert.Equal(t, 1, stats.BucketN, "BucketN")
-			assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN")
-			assert.Equal(t, pageHeaderSize+leafPageElementSize+6, stats.InlineBucketInuse, "InlineBucketInuse")
-			return nil
-		})
+		return nil
+	})
+	db.MustCheck()
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("whozawhats"))
+		stats := b.Stats()
+		assert.Equal(t, 0, stats.BranchPageN, "BranchPageN")
+		assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
+		assert.Equal(t, 0, stats.LeafPageN, "LeafPageN")
+		assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
+		assert.Equal(t, 1, stats.KeyN, "KeyN")
+		assert.Equal(t, 1, stats.Depth, "Depth")
+		assert.Equal(t, 0, stats.BranchInuse, "BranchInuse")
+		assert.Equal(t, 0, stats.LeafInuse, "LeafInuse")
+		if os.Getpagesize() == 4096 {
+			// Incompatible page size
+			assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc")
+			assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc")
+		}
+		assert.Equal(t, 1, stats.BucketN, "BucketN")
+		assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN")
+		assert.Equal(t, pageHeaderSize+leafPageElementSize+6, stats.InlineBucketInuse, "InlineBucketInuse")
+		return nil
 	})
 }
 
 func TestBucket_Stats_EmptyBucket(t *testing.T) {
+	db := NewTestDB()
+	defer db.Close()
 
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			// Add a bucket that fits on a single root leaf.
-			_, err := tx.CreateBucket([]byte("whozawhats"))
-			assert.NoError(t, err)
-			return nil
-		})
-		mustCheck(db)
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("whozawhats"))
-			stats := b.Stats()
-			assert.Equal(t, 0, stats.BranchPageN, "BranchPageN")
-			assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
-			assert.Equal(t, 0, stats.LeafPageN, "LeafPageN")
-			assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
-			assert.Equal(t, 0, stats.KeyN, "KeyN")
-			assert.Equal(t, 1, stats.Depth, "Depth")
-			assert.Equal(t, 0, stats.BranchInuse, "BranchInuse")
-			assert.Equal(t, 0, stats.LeafInuse, "LeafInuse")
-			if os.Getpagesize() == 4096 {
-				// Incompatible page size
-				assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc")
-				assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc")
-			}
-			assert.Equal(t, 1, stats.BucketN, "BucketN")
-			assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN")
-			assert.Equal(t, pageHeaderSize, stats.InlineBucketInuse, "InlineBucketInuse")
-			return nil
-		})
+	db.Update(func(tx *Tx) error {
+		// Add a bucket that fits on a single root leaf.
+		_, err := tx.CreateBucket([]byte("whozawhats"))
+		assert.NoError(t, err)
+		return nil
+	})
+	db.MustCheck()
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("whozawhats"))
+		stats := b.Stats()
+		assert.Equal(t, 0, stats.BranchPageN, "BranchPageN")
+		assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
+		assert.Equal(t, 0, stats.LeafPageN, "LeafPageN")
+		assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
+		assert.Equal(t, 0, stats.KeyN, "KeyN")
+		assert.Equal(t, 1, stats.Depth, "Depth")
+		assert.Equal(t, 0, stats.BranchInuse, "BranchInuse")
+		assert.Equal(t, 0, stats.LeafInuse, "LeafInuse")
+		if os.Getpagesize() == 4096 {
+			// Incompatible page size
+			assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc")
+			assert.Equal(t, 0, stats.LeafAlloc, "LeafAlloc")
+		}
+		assert.Equal(t, 1, stats.BucketN, "BucketN")
+		assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN")
+		assert.Equal(t, pageHeaderSize, stats.InlineBucketInuse, "InlineBucketInuse")
+		return nil
 	})
 }
 
 // Ensure a bucket can calculate stats.
 func TestBucket_Stats_Nested(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			for i := 0; i < 100; i++ {
-				b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i)))
-			}
-			bar, err := b.CreateBucket([]byte("bar"))
-			assert.NoError(t, err)
-			for i := 0; i < 10; i++ {
-				bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
-			}
-			baz, err := bar.CreateBucket([]byte("baz"))
-			assert.NoError(t, err)
-			for i := 0; i < 10; i++ {
-				baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
-			}
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
 
-		mustCheck(db)
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		for i := 0; i < 100; i++ {
+			b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i)))
+		}
+		bar, err := b.CreateBucket([]byte("bar"))
+		assert.NoError(t, err)
+		for i := 0; i < 10; i++ {
+			bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+		}
+		baz, err := bar.CreateBucket([]byte("baz"))
+		assert.NoError(t, err)
+		for i := 0; i < 10; i++ {
+			baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+		}
+		return nil
+	})
 
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("foo"))
-			stats := b.Stats()
-			assert.Equal(t, 0, stats.BranchPageN, "BranchPageN")
-			assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
-			assert.Equal(t, 2, stats.LeafPageN, "LeafPageN")
-			assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
-			assert.Equal(t, 122, stats.KeyN, "KeyN")
-			assert.Equal(t, 3, stats.Depth, "Depth")
-			assert.Equal(t, 0, stats.BranchInuse, "BranchInuse")
+	db.MustCheck()
 
-			foo := pageHeaderSize            // foo
-			foo += 101 * leafPageElementSize // foo leaf elements
-			foo += 100*2 + 100*2             // foo leaf key/values
-			foo += 3 + bucketHeaderSize      // foo -> bar key/value
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("foo"))
+		stats := b.Stats()
+		assert.Equal(t, 0, stats.BranchPageN, "BranchPageN")
+		assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
+		assert.Equal(t, 2, stats.LeafPageN, "LeafPageN")
+		assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
+		assert.Equal(t, 122, stats.KeyN, "KeyN")
+		assert.Equal(t, 3, stats.Depth, "Depth")
+		assert.Equal(t, 0, stats.BranchInuse, "BranchInuse")
 
-			bar := pageHeaderSize           // bar
-			bar += 11 * leafPageElementSize // bar leaf elements
-			bar += 10 + 10                  // bar leaf key/values
-			bar += 3 + bucketHeaderSize     // bar -> baz key/value
+		foo := pageHeaderSize            // foo
+		foo += 101 * leafPageElementSize // foo leaf elements
+		foo += 100*2 + 100*2             // foo leaf key/values
+		foo += 3 + bucketHeaderSize      // foo -> bar key/value
 
-			baz := pageHeaderSize           // baz (inline)
-			baz += 10 * leafPageElementSize // baz leaf elements
-			baz += 10 + 10                  // baz leaf key/values
+		bar := pageHeaderSize           // bar
+		bar += 11 * leafPageElementSize // bar leaf elements
+		bar += 10 + 10                  // bar leaf key/values
+		bar += 3 + bucketHeaderSize     // bar -> baz key/value
 
-			assert.Equal(t, foo+bar+baz, stats.LeafInuse, "LeafInuse")
-			if os.Getpagesize() == 4096 {
-				// Incompatible page size
-				assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc")
-				assert.Equal(t, 8192, stats.LeafAlloc, "LeafAlloc")
-			}
-			assert.Equal(t, 3, stats.BucketN, "BucketN")
-			assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN")
-			assert.Equal(t, baz, stats.InlineBucketInuse, "InlineBucketInuse")
-			return nil
-		})
+		baz := pageHeaderSize           // baz (inline)
+		baz += 10 * leafPageElementSize // baz leaf elements
+		baz += 10 + 10                  // baz leaf key/values
+
+		assert.Equal(t, foo+bar+baz, stats.LeafInuse, "LeafInuse")
+		if os.Getpagesize() == 4096 {
+			// Incompatible page size
+			assert.Equal(t, 0, stats.BranchAlloc, "BranchAlloc")
+			assert.Equal(t, 8192, stats.LeafAlloc, "LeafAlloc")
+		}
+		assert.Equal(t, 3, stats.BucketN, "BucketN")
+		assert.Equal(t, 1, stats.InlineBucketN, "InlineBucketN")
+		assert.Equal(t, baz, stats.InlineBucketInuse, "InlineBucketInuse")
+		return nil
 	})
 }
 
@@ -850,42 +852,43 @@ func TestBucket_Stats_Large(t *testing.T) {
 		t.Skip("skipping test in short mode.")
 	}
 
-	withOpenDB(func(db *DB, path string) {
-		var index int
-		for i := 0; i < 100; i++ {
-			db.Update(func(tx *Tx) error {
-				// Add bucket with lots of keys.
-				b, _ := tx.CreateBucketIfNotExists([]byte("widgets"))
-				for i := 0; i < 1000; i++ {
-					b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index)))
-					index++
-				}
-				return nil
-			})
-		}
-		mustCheck(db)
+	db := NewTestDB()
+	defer db.Close()
 
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			stats := b.Stats()
-			assert.Equal(t, 13, stats.BranchPageN, "BranchPageN")
-			assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
-			assert.Equal(t, 1196, stats.LeafPageN, "LeafPageN")
-			assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
-			assert.Equal(t, 100000, stats.KeyN, "KeyN")
-			assert.Equal(t, 3, stats.Depth, "Depth")
-			assert.Equal(t, 25257, stats.BranchInuse, "BranchInuse")
-			assert.Equal(t, 2596916, stats.LeafInuse, "LeafInuse")
-			if os.Getpagesize() == 4096 {
-				// Incompatible page size
-				assert.Equal(t, 53248, stats.BranchAlloc, "BranchAlloc")
-				assert.Equal(t, 4898816, stats.LeafAlloc, "LeafAlloc")
+	var index int
+	for i := 0; i < 100; i++ {
+		db.Update(func(tx *Tx) error {
+			// Add bucket with lots of keys.
+			b, _ := tx.CreateBucketIfNotExists([]byte("widgets"))
+			for i := 0; i < 1000; i++ {
+				b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index)))
+				index++
 			}
-			assert.Equal(t, 1, stats.BucketN, "BucketN")
-			assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN")
-			assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse")
 			return nil
 		})
+	}
+	db.MustCheck()
+
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		stats := b.Stats()
+		assert.Equal(t, 13, stats.BranchPageN, "BranchPageN")
+		assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN")
+		assert.Equal(t, 1196, stats.LeafPageN, "LeafPageN")
+		assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN")
+		assert.Equal(t, 100000, stats.KeyN, "KeyN")
+		assert.Equal(t, 3, stats.Depth, "Depth")
+		assert.Equal(t, 25257, stats.BranchInuse, "BranchInuse")
+		assert.Equal(t, 2596916, stats.LeafInuse, "LeafInuse")
+		if os.Getpagesize() == 4096 {
+			// Incompatible page size
+			assert.Equal(t, 53248, stats.BranchAlloc, "BranchAlloc")
+			assert.Equal(t, 4898816, stats.LeafAlloc, "LeafAlloc")
+		}
+		assert.Equal(t, 1, stats.BucketN, "BucketN")
+		assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN")
+		assert.Equal(t, 0, stats.InlineBucketInuse, "InlineBucketInuse")
+		return nil
 	})
 }
 
@@ -897,37 +900,39 @@ func TestBucket_Put_Single(t *testing.T) {
 
 	index := 0
 	f := func(items testdata) bool {
-		withOpenDB(func(db *DB, path string) {
-			m := make(map[string][]byte)
+		db := NewTestDB()
+		defer db.Close()
 
-			db.Update(func(tx *Tx) error {
-				_, err := tx.CreateBucket([]byte("widgets"))
-				return err
-			})
-			for _, item := range items {
-				db.Update(func(tx *Tx) error {
-					if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
-						panic("put error: " + err.Error())
-					}
-					m[string(item.Key)] = item.Value
-					return nil
-				})
+		m := make(map[string][]byte)
 
-				// Verify all key/values so far.
-				db.View(func(tx *Tx) error {
-					i := 0
-					for k, v := range m {
-						value := tx.Bucket([]byte("widgets")).Get([]byte(k))
-						if !bytes.Equal(value, v) {
-							t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
-							copyAndFailNow(t, db)
-						}
-						i++
-					}
-					return nil
-				})
-			}
+		db.Update(func(tx *Tx) error {
+			_, err := tx.CreateBucket([]byte("widgets"))
+			return err
 		})
+		for _, item := range items {
+			db.Update(func(tx *Tx) error {
+				if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
+					panic("put error: " + err.Error())
+				}
+				m[string(item.Key)] = item.Value
+				return nil
+			})
+
+			// Verify all key/values so far.
+			db.View(func(tx *Tx) error {
+				i := 0
+				for k, v := range m {
+					value := tx.Bucket([]byte("widgets")).Get([]byte(k))
+					if !bytes.Equal(value, v) {
+						t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
+						db.CopyTempFile()
+						t.FailNow()
+					}
+					i++
+				}
+				return nil
+			})
+		}
 
 		index++
 		return true
@@ -944,32 +949,33 @@ func TestBucket_Put_Multiple(t *testing.T) {
 	}
 
 	f := func(items testdata) bool {
-		withOpenDB(func(db *DB, path string) {
-			// Bulk insert all values.
-			db.Update(func(tx *Tx) error {
-				_, err := tx.CreateBucket([]byte("widgets"))
-				return err
-			})
-			err := db.Update(func(tx *Tx) error {
-				b := tx.Bucket([]byte("widgets"))
-				for _, item := range items {
-					assert.NoError(t, b.Put(item.Key, item.Value))
-				}
-				return nil
-			})
-			assert.NoError(t, err)
+		db := NewTestDB()
+		defer db.Close()
+		// Bulk insert all values.
+		db.Update(func(tx *Tx) error {
+			_, err := tx.CreateBucket([]byte("widgets"))
+			return err
+		})
+		err := db.Update(func(tx *Tx) error {
+			b := tx.Bucket([]byte("widgets"))
+			for _, item := range items {
+				assert.NoError(t, b.Put(item.Key, item.Value))
+			}
+			return nil
+		})
+		assert.NoError(t, err)
 
-			// Verify all items exist.
-			db.View(func(tx *Tx) error {
-				b := tx.Bucket([]byte("widgets"))
-				for _, item := range items {
-					value := b.Get(item.Key)
-					if !assert.Equal(t, item.Value, value) {
-						copyAndFailNow(t, db)
-					}
+		// Verify all items exist.
+		db.View(func(tx *Tx) error {
+			b := tx.Bucket([]byte("widgets"))
+			for _, item := range items {
+				value := b.Get(item.Key)
+				if !assert.Equal(t, item.Value, value) {
+					db.CopyTempFile()
+					t.FailNow()
 				}
-				return nil
-			})
+			}
+			return nil
 		})
 		return true
 	}
@@ -985,37 +991,37 @@ func TestBucket_Delete_Quick(t *testing.T) {
 	}
 
 	f := func(items testdata) bool {
-		withOpenDB(func(db *DB, path string) {
-			// Bulk insert all values.
-			db.Update(func(tx *Tx) error {
-				_, err := tx.CreateBucket([]byte("widgets"))
-				return err
-			})
+		db := NewTestDB()
+		defer db.Close()
+		// Bulk insert all values.
+		db.Update(func(tx *Tx) error {
+			_, err := tx.CreateBucket([]byte("widgets"))
+			return err
+		})
+		err := db.Update(func(tx *Tx) error {
+			b := tx.Bucket([]byte("widgets"))
+			for _, item := range items {
+				assert.NoError(t, b.Put(item.Key, item.Value))
+			}
+			return nil
+		})
+		assert.NoError(t, err)
+
+		// Remove items one at a time and check consistency.
+		for _, item := range items {
 			err := db.Update(func(tx *Tx) error {
-				b := tx.Bucket([]byte("widgets"))
-				for _, item := range items {
-					assert.NoError(t, b.Put(item.Key, item.Value))
-				}
-				return nil
+				return tx.Bucket([]byte("widgets")).Delete(item.Key)
 			})
 			assert.NoError(t, err)
+		}
 
-			// Remove items one at a time and check consistency.
-			for _, item := range items {
-				err := db.Update(func(tx *Tx) error {
-					return tx.Bucket([]byte("widgets")).Delete(item.Key)
-				})
-				assert.NoError(t, err)
-			}
-
-			// Anything before our deletion index should be nil.
-			db.View(func(tx *Tx) error {
-				tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
-					t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
-					return nil
-				})
+		// Anything before our deletion index should be nil.
+		db.View(func(tx *Tx) error {
+			tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+				t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
 				return nil
 			})
+			return nil
 		})
 		return true
 	}
diff --git a/cursor_test.go b/cursor_test.go
index 470860d..424d254 100644
--- a/cursor_test.go
+++ b/cursor_test.go
@@ -12,98 +12,99 @@ import (
 
 // Ensure that a cursor can return a reference to the bucket that created it.
 func TestCursor_Bucket(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, _ := tx.CreateBucket([]byte("widgets"))
-			c := b.Cursor()
-			assert.Equal(t, b, c.Bucket())
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		b, _ := tx.CreateBucket([]byte("widgets"))
+		c := b.Cursor()
+		assert.Equal(t, b, c.Bucket())
+		return nil
 	})
 }
 
 // Ensure that a Tx cursor can seek to the appropriate keys.
 func TestCursor_Seek(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
-			assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
-			assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
-			_, err = b.CreateBucket([]byte("bkt"))
-			assert.NoError(t, err)
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			c := tx.Bucket([]byte("widgets")).Cursor()
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
+		assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
+		assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
+		_, err = b.CreateBucket([]byte("bkt"))
+		assert.NoError(t, err)
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		c := tx.Bucket([]byte("widgets")).Cursor()
 
-			// Exact match should go to the key.
-			k, v := c.Seek([]byte("bar"))
-			assert.Equal(t, []byte("bar"), k)
-			assert.Equal(t, []byte("0002"), v)
+		// Exact match should go to the key.
+		k, v := c.Seek([]byte("bar"))
+		assert.Equal(t, []byte("bar"), k)
+		assert.Equal(t, []byte("0002"), v)
 
-			// Inexact match should go to the next key.
-			k, v = c.Seek([]byte("bas"))
-			assert.Equal(t, []byte("baz"), k)
-			assert.Equal(t, []byte("0003"), v)
+		// Inexact match should go to the next key.
+		k, v = c.Seek([]byte("bas"))
+		assert.Equal(t, []byte("baz"), k)
+		assert.Equal(t, []byte("0003"), v)
 
-			// Low key should go to the first key.
-			k, v = c.Seek([]byte(""))
-			assert.Equal(t, []byte("bar"), k)
-			assert.Equal(t, []byte("0002"), v)
+		// Low key should go to the first key.
+		k, v = c.Seek([]byte(""))
+		assert.Equal(t, []byte("bar"), k)
+		assert.Equal(t, []byte("0002"), v)
 
-			// High key should return no key.
-			k, v = c.Seek([]byte("zzz"))
-			assert.Nil(t, k)
-			assert.Nil(t, v)
+		// High key should return no key.
+		k, v = c.Seek([]byte("zzz"))
+		assert.Nil(t, k)
+		assert.Nil(t, v)
 
-			// Buckets should return their key but no value.
-			k, v = c.Seek([]byte("bkt"))
-			assert.Equal(t, []byte("bkt"), k)
-			assert.Nil(t, v)
+		// Buckets should return their key but no value.
+		k, v = c.Seek([]byte("bkt"))
+		assert.Equal(t, []byte("bkt"), k)
+		assert.Nil(t, v)
 
-			return nil
-		})
+		return nil
 	})
 }
 
 func TestCursor_Delete(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		var count = 1000
+	db := NewTestDB()
+	defer db.Close()
 
-		// Insert every other key between 0 and $count.
-		db.Update(func(tx *Tx) error {
-			b, _ := tx.CreateBucket([]byte("widgets"))
-			for i := 0; i < count; i += 1 {
-				k := make([]byte, 8)
-				binary.BigEndian.PutUint64(k, uint64(i))
-				b.Put(k, make([]byte, 100))
+	var count = 1000
+
+	// Insert every other key between 0 and $count.
+	db.Update(func(tx *Tx) error {
+		b, _ := tx.CreateBucket([]byte("widgets"))
+		for i := 0; i < count; i += 1 {
+			k := make([]byte, 8)
+			binary.BigEndian.PutUint64(k, uint64(i))
+			b.Put(k, make([]byte, 100))
+		}
+		b.CreateBucket([]byte("sub"))
+		return nil
+	})
+
+	db.Update(func(tx *Tx) error {
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		bound := make([]byte, 8)
+		binary.BigEndian.PutUint64(bound, uint64(count/2))
+		for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
+			if err := c.Delete(); err != nil {
+				return err
 			}
-			b.CreateBucket([]byte("sub"))
-			return nil
-		})
+		}
+		c.Seek([]byte("sub"))
+		err := c.Delete()
+		assert.Equal(t, err, ErrIncompatibleValue)
+		return nil
+	})
 
-		db.Update(func(tx *Tx) error {
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			bound := make([]byte, 8)
-			binary.BigEndian.PutUint64(bound, uint64(count/2))
-			for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
-				if err := c.Delete(); err != nil {
-					return err
-				}
-			}
-			c.Seek([]byte("sub"))
-			err := c.Delete()
-			assert.Equal(t, err, ErrIncompatibleValue)
-			return nil
-		})
-
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			assert.Equal(t, b.Stats().KeyN, count/2+1)
-			return nil
-		})
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		assert.Equal(t, b.Stats().KeyN, count/2+1)
+		return nil
 	})
 }
 
@@ -113,216 +114,223 @@ func TestCursor_Delete(t *testing.T) {
 //
 // Related: https://github.com/boltdb/bolt/pull/187
 func TestCursor_Seek_Large(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		var count = 10000
+	db := NewTestDB()
+	defer db.Close()
 
-		// Insert every other key between 0 and $count.
-		db.Update(func(tx *Tx) error {
-			b, _ := tx.CreateBucket([]byte("widgets"))
-			for i := 0; i < count; i += 100 {
-				for j := i; j < i+100; j += 2 {
-					k := make([]byte, 8)
-					binary.BigEndian.PutUint64(k, uint64(j))
-					b.Put(k, make([]byte, 100))
-				}
+	var count = 10000
+
+	// Insert every other key between 0 and $count.
+	db.Update(func(tx *Tx) error {
+		b, _ := tx.CreateBucket([]byte("widgets"))
+		for i := 0; i < count; i += 100 {
+			for j := i; j < i+100; j += 2 {
+				k := make([]byte, 8)
+				binary.BigEndian.PutUint64(k, uint64(j))
+				b.Put(k, make([]byte, 100))
 			}
-			return nil
-		})
+		}
+		return nil
+	})
 
-		db.View(func(tx *Tx) error {
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			for i := 0; i < count; i++ {
-				seek := make([]byte, 8)
-				binary.BigEndian.PutUint64(seek, uint64(i))
+	db.View(func(tx *Tx) error {
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		for i := 0; i < count; i++ {
+			seek := make([]byte, 8)
+			binary.BigEndian.PutUint64(seek, uint64(i))
 
-				k, _ := c.Seek(seek)
+			k, _ := c.Seek(seek)
 
-				// The last seek is beyond the end of the the range so
-				// it should return nil.
-				if i == count-1 {
-					assert.Nil(t, k)
-					continue
-				}
-
-				// Otherwise we should seek to the exact key or the next key.
-				num := binary.BigEndian.Uint64(k)
-				if i%2 == 0 {
-					assert.Equal(t, uint64(i), num)
-				} else {
-					assert.Equal(t, uint64(i+1), num)
-				}
+			// The last seek is beyond the end of the the range so
+			// it should return nil.
+			if i == count-1 {
+				assert.Nil(t, k)
+				continue
 			}
 
-			return nil
-		})
+			// Otherwise we should seek to the exact key or the next key.
+			num := binary.BigEndian.Uint64(k)
+			if i%2 == 0 {
+				assert.Equal(t, uint64(i), num)
+			} else {
+				assert.Equal(t, uint64(i+1), num)
+			}
+		}
+
+		return nil
 	})
 }
 
 // Ensure that a cursor can iterate over an empty bucket without error.
 func TestCursor_EmptyBucket(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			return err
-		})
-		db.View(func(tx *Tx) error {
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			k, v := c.First()
-			assert.Nil(t, k)
-			assert.Nil(t, v)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		return err
+	})
+	db.View(func(tx *Tx) error {
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		k, v := c.First()
+		assert.Nil(t, k)
+		assert.Nil(t, v)
+		return nil
 	})
 }
 
 // Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
 func TestCursor_EmptyBucketReverse(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			return err
-		})
-		db.View(func(tx *Tx) error {
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			k, v := c.Last()
-			assert.Nil(t, k)
-			assert.Nil(t, v)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		return err
+	})
+	db.View(func(tx *Tx) error {
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		k, v := c.Last()
+		assert.Nil(t, k)
+		assert.Nil(t, v)
+		return nil
 	})
 }
 
 // Ensure that a Tx cursor can iterate over a single root with a couple elements.
 func TestCursor_Iterate_Leaf(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
-			tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
-			return nil
-		})
-		tx, _ := db.Begin(false)
-		c := tx.Bucket([]byte("widgets")).Cursor()
+	db := NewTestDB()
+	defer db.Close()
 
-		k, v := c.First()
-		assert.Equal(t, string(k), "bar")
-		assert.Equal(t, v, []byte{1})
-
-		k, v = c.Next()
-		assert.Equal(t, string(k), "baz")
-		assert.Equal(t, v, []byte{})
-
-		k, v = c.Next()
-		assert.Equal(t, string(k), "foo")
-		assert.Equal(t, v, []byte{0})
-
-		k, v = c.Next()
-		assert.Nil(t, k)
-		assert.Nil(t, v)
-
-		k, v = c.Next()
-		assert.Nil(t, k)
-		assert.Nil(t, v)
-
-		tx.Rollback()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
+		tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
+		return nil
 	})
+	tx, _ := db.Begin(false)
+	c := tx.Bucket([]byte("widgets")).Cursor()
+
+	k, v := c.First()
+	assert.Equal(t, string(k), "bar")
+	assert.Equal(t, v, []byte{1})
+
+	k, v = c.Next()
+	assert.Equal(t, string(k), "baz")
+	assert.Equal(t, v, []byte{})
+
+	k, v = c.Next()
+	assert.Equal(t, string(k), "foo")
+	assert.Equal(t, v, []byte{0})
+
+	k, v = c.Next()
+	assert.Nil(t, k)
+	assert.Nil(t, v)
+
+	k, v = c.Next()
+	assert.Nil(t, k)
+	assert.Nil(t, v)
+
+	tx.Rollback()
 }
 
 // Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
 func TestCursor_LeafRootReverse(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
-			tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
-			return nil
-		})
-		tx, _ := db.Begin(false)
-		c := tx.Bucket([]byte("widgets")).Cursor()
+	db := NewTestDB()
+	defer db.Close()
 
-		k, v := c.Last()
-		assert.Equal(t, string(k), "foo")
-		assert.Equal(t, v, []byte{0})
-
-		k, v = c.Prev()
-		assert.Equal(t, string(k), "baz")
-		assert.Equal(t, v, []byte{})
-
-		k, v = c.Prev()
-		assert.Equal(t, string(k), "bar")
-		assert.Equal(t, v, []byte{1})
-
-		k, v = c.Prev()
-		assert.Nil(t, k)
-		assert.Nil(t, v)
-
-		k, v = c.Prev()
-		assert.Nil(t, k)
-		assert.Nil(t, v)
-
-		tx.Rollback()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
+		tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
+		return nil
 	})
+	tx, _ := db.Begin(false)
+	c := tx.Bucket([]byte("widgets")).Cursor()
+
+	k, v := c.Last()
+	assert.Equal(t, string(k), "foo")
+	assert.Equal(t, v, []byte{0})
+
+	k, v = c.Prev()
+	assert.Equal(t, string(k), "baz")
+	assert.Equal(t, v, []byte{})
+
+	k, v = c.Prev()
+	assert.Equal(t, string(k), "bar")
+	assert.Equal(t, v, []byte{1})
+
+	k, v = c.Prev()
+	assert.Nil(t, k)
+	assert.Nil(t, v)
+
+	k, v = c.Prev()
+	assert.Nil(t, k)
+	assert.Nil(t, v)
+
+	tx.Rollback()
 }
 
 // Ensure that a Tx cursor can restart from the beginning.
 func TestCursor_Restart(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
 
-		tx, _ := db.Begin(false)
-		c := tx.Bucket([]byte("widgets")).Cursor()
-
-		k, _ := c.First()
-		assert.Equal(t, string(k), "bar")
-
-		k, _ = c.Next()
-		assert.Equal(t, string(k), "foo")
-
-		k, _ = c.First()
-		assert.Equal(t, string(k), "bar")
-
-		k, _ = c.Next()
-		assert.Equal(t, string(k), "foo")
-
-		tx.Rollback()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
+		return nil
 	})
+
+	tx, _ := db.Begin(false)
+	c := tx.Bucket([]byte("widgets")).Cursor()
+
+	k, _ := c.First()
+	assert.Equal(t, string(k), "bar")
+
+	k, _ = c.Next()
+	assert.Equal(t, string(k), "foo")
+
+	k, _ = c.First()
+	assert.Equal(t, string(k), "bar")
+
+	k, _ = c.Next()
+	assert.Equal(t, string(k), "foo")
+
+	tx.Rollback()
 }
 
 // Ensure that a Tx can iterate over all elements in a bucket.
 func TestCursor_QuickCheck(t *testing.T) {
 	f := func(items testdata) bool {
-		withOpenDB(func(db *DB, path string) {
-			// Bulk insert all values.
-			tx, _ := db.Begin(true)
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			for _, item := range items {
-				assert.NoError(t, b.Put(item.Key, item.Value))
-			}
-			assert.NoError(t, tx.Commit())
+		db := NewTestDB()
+		defer db.Close()
 
-			// Sort test data.
-			sort.Sort(items)
+		// Bulk insert all values.
+		tx, _ := db.Begin(true)
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		for _, item := range items {
+			assert.NoError(t, b.Put(item.Key, item.Value))
+		}
+		assert.NoError(t, tx.Commit())
+
+		// Sort test data.
+		sort.Sort(items)
+
+		// Iterate over all items and check consistency.
+		var index = 0
+		tx, _ = db.Begin(false)
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
+			assert.Equal(t, k, items[index].Key)
+			assert.Equal(t, v, items[index].Value)
+			index++
+		}
+		assert.Equal(t, len(items), index)
+		tx.Rollback()
 
-			// Iterate over all items and check consistency.
-			var index = 0
-			tx, _ = db.Begin(false)
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
-				assert.Equal(t, k, items[index].Key)
-				assert.Equal(t, v, items[index].Value)
-				index++
-			}
-			assert.Equal(t, len(items), index)
-			tx.Rollback()
-		})
 		return true
 	}
 	if err := quick.Check(f, qconfig()); err != nil {
@@ -333,31 +341,33 @@ func TestCursor_QuickCheck(t *testing.T) {
 // Ensure that a transaction can iterate over all elements in a bucket in reverse.
 func TestCursor_QuickCheck_Reverse(t *testing.T) {
 	f := func(items testdata) bool {
-		withOpenDB(func(db *DB, path string) {
-			// Bulk insert all values.
-			tx, _ := db.Begin(true)
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			for _, item := range items {
-				assert.NoError(t, b.Put(item.Key, item.Value))
-			}
-			assert.NoError(t, tx.Commit())
+		db := NewTestDB()
+		defer db.Close()
 
-			// Sort test data.
-			sort.Sort(revtestdata(items))
+		// Bulk insert all values.
+		tx, _ := db.Begin(true)
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		for _, item := range items {
+			assert.NoError(t, b.Put(item.Key, item.Value))
+		}
+		assert.NoError(t, tx.Commit())
+
+		// Sort test data.
+		sort.Sort(revtestdata(items))
+
+		// Iterate over all items and check consistency.
+		var index = 0
+		tx, _ = db.Begin(false)
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
+			assert.Equal(t, k, items[index].Key)
+			assert.Equal(t, v, items[index].Value)
+			index++
+		}
+		assert.Equal(t, len(items), index)
+		tx.Rollback()
 
-			// Iterate over all items and check consistency.
-			var index = 0
-			tx, _ = db.Begin(false)
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
-				assert.Equal(t, k, items[index].Key)
-				assert.Equal(t, v, items[index].Value)
-				index++
-			}
-			assert.Equal(t, len(items), index)
-			tx.Rollback()
-		})
 		return true
 	}
 	if err := quick.Check(f, qconfig()); err != nil {
@@ -367,54 +377,56 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
 
 // Ensure that a Tx cursor can iterate over subbuckets.
 func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			_, err = b.CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			_, err = b.CreateBucket([]byte("bar"))
-			assert.NoError(t, err)
-			_, err = b.CreateBucket([]byte("baz"))
-			assert.NoError(t, err)
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			var names []string
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			for k, v := c.First(); k != nil; k, v = c.Next() {
-				names = append(names, string(k))
-				assert.Nil(t, v)
-			}
-			assert.Equal(t, names, []string{"bar", "baz", "foo"})
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		_, err = b.CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		_, err = b.CreateBucket([]byte("bar"))
+		assert.NoError(t, err)
+		_, err = b.CreateBucket([]byte("baz"))
+		assert.NoError(t, err)
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		var names []string
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		for k, v := c.First(); k != nil; k, v = c.Next() {
+			names = append(names, string(k))
+			assert.Nil(t, v)
+		}
+		assert.Equal(t, names, []string{"bar", "baz", "foo"})
+		return nil
 	})
 }
 
 // Ensure that a Tx cursor can reverse iterate over subbuckets.
 func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NoError(t, err)
-			_, err = b.CreateBucket([]byte("foo"))
-			assert.NoError(t, err)
-			_, err = b.CreateBucket([]byte("bar"))
-			assert.NoError(t, err)
-			_, err = b.CreateBucket([]byte("baz"))
-			assert.NoError(t, err)
-			return nil
-		})
-		db.View(func(tx *Tx) error {
-			var names []string
-			c := tx.Bucket([]byte("widgets")).Cursor()
-			for k, v := c.Last(); k != nil; k, v = c.Prev() {
-				names = append(names, string(k))
-				assert.Nil(t, v)
-			}
-			assert.Equal(t, names, []string{"foo", "baz", "bar"})
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NoError(t, err)
+		_, err = b.CreateBucket([]byte("foo"))
+		assert.NoError(t, err)
+		_, err = b.CreateBucket([]byte("bar"))
+		assert.NoError(t, err)
+		_, err = b.CreateBucket([]byte("baz"))
+		assert.NoError(t, err)
+		return nil
+	})
+	db.View(func(tx *Tx) error {
+		var names []string
+		c := tx.Bucket([]byte("widgets")).Cursor()
+		for k, v := c.Last(); k != nil; k, v = c.Prev() {
+			names = append(names, string(k))
+			assert.Nil(t, v)
+		}
+		assert.Equal(t, names, []string{"foo", "baz", "bar"})
+		return nil
 	})
 }
diff --git a/db_test.go b/db_test.go
index b38be93..7044359 100644
--- a/db_test.go
+++ b/db_test.go
@@ -206,14 +206,14 @@ func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
 
 // Ensure that a read-write transaction can be retrieved.
 func TestDB_BeginRW(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, err := db.Begin(true)
-		assert.NotNil(t, tx)
-		assert.NoError(t, err)
-		assert.Equal(t, tx.DB(), db)
-		assert.Equal(t, tx.Writable(), true)
-		assert.NoError(t, tx.Commit())
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, err := db.Begin(true)
+	assert.NotNil(t, tx)
+	assert.NoError(t, err)
+	assert.Equal(t, tx.DB(), db)
+	assert.Equal(t, tx.Writable(), true)
+	assert.NoError(t, tx.Commit())
 }
 
 // Ensure that opening a transaction while the DB is closed returns an error.
@@ -226,23 +226,23 @@ func TestDB_BeginRW_Closed(t *testing.T) {
 
 // Ensure a database can provide a transactional block.
 func TestDB_Update(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		err := db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			b.Put([]byte("foo"), []byte("bar"))
-			b.Put([]byte("baz"), []byte("bat"))
-			b.Delete([]byte("foo"))
-			return nil
-		})
-		assert.NoError(t, err)
-		err = db.View(func(tx *Tx) error {
-			assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
-			assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
-			return nil
-		})
-		assert.NoError(t, err)
+	db := NewTestDB()
+	defer db.Close()
+	err := db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		b.Put([]byte("foo"), []byte("bar"))
+		b.Put([]byte("baz"), []byte("bat"))
+		b.Delete([]byte("foo"))
+		return nil
 	})
+	assert.NoError(t, err)
+	err = db.View(func(tx *Tx) error {
+		assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+		assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
+		return nil
+	})
+	assert.NoError(t, err)
 }
 
 // Ensure a closed database returns an error while running a transaction block
@@ -273,69 +273,70 @@ func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
 
 // Ensure a write transaction that panics does not hold open locks.
 func TestDB_Update_Panic(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		func() {
-			defer func() {
-				if r := recover(); r != nil {
-					warn("recover: update", r)
-				}
-			}()
-			db.Update(func(tx *Tx) error {
-				tx.CreateBucket([]byte("widgets"))
-				panic("omg")
-			})
+	db := NewTestDB()
+	defer db.Close()
+
+	func() {
+		defer func() {
+			if r := recover(); r != nil {
+				warn("recover: update", r)
+			}
 		}()
-
-		// Verify we can update again.
-		err := db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			return err
+		db.Update(func(tx *Tx) error {
+			tx.CreateBucket([]byte("widgets"))
+			panic("omg")
 		})
-		assert.NoError(t, err)
+	}()
 
-		// Verify that our change persisted.
-		err = db.Update(func(tx *Tx) error {
-			assert.NotNil(t, tx.Bucket([]byte("widgets")))
-			return nil
-		})
+	// Verify we can update again.
+	err := db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		return err
+	})
+	assert.NoError(t, err)
+
+	// Verify that our change persisted.
+	err = db.Update(func(tx *Tx) error {
+		assert.NotNil(t, tx.Bucket([]byte("widgets")))
+		return nil
 	})
 }
 
 // Ensure a database can return an error through a read-only transactional block.
 func TestDB_View_Error(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		err := db.View(func(tx *Tx) error {
-			return errors.New("xxx")
-		})
-		assert.Equal(t, errors.New("xxx"), err)
+	db := NewTestDB()
+	defer db.Close()
+	err := db.View(func(tx *Tx) error {
+		return errors.New("xxx")
 	})
+	assert.Equal(t, errors.New("xxx"), err)
 }
 
 // Ensure a read transaction that panics does not hold open locks.
 func TestDB_View_Panic(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		return nil
+	})
 
-		func() {
-			defer func() {
-				if r := recover(); r != nil {
-					warn("recover: view", r)
-				}
-			}()
-			db.View(func(tx *Tx) error {
-				assert.NotNil(t, tx.Bucket([]byte("widgets")))
-				panic("omg")
-			})
+	func() {
+		defer func() {
+			if r := recover(); r != nil {
+				warn("recover: view", r)
+			}
 		}()
-
-		// Verify that we can still use read transactions.
 		db.View(func(tx *Tx) error {
 			assert.NotNil(t, tx.Bucket([]byte("widgets")))
-			return nil
+			panic("omg")
 		})
+	}()
+
+	// Verify that we can still use read transactions.
+	db.View(func(tx *Tx) error {
+		assert.NotNil(t, tx.Bucket([]byte("widgets")))
+		return nil
 	})
 }
 
@@ -346,16 +347,16 @@ func TestDB_Commit_WriteFail(t *testing.T) {
 
 // Ensure that DB stats can be returned.
 func TestDB_Stats(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			return err
-		})
-		stats := db.Stats()
-		assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
-		assert.Equal(t, 0, stats.FreePageN, "FreePageN")
-		assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		return err
 	})
+	stats := db.Stats()
+	assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
+	assert.Equal(t, 0, stats.FreePageN, "FreePageN")
+	assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
 }
 
 // Ensure that the mmap grows appropriately.
@@ -373,41 +374,41 @@ func TestDB_mmapSize(t *testing.T) {
 
 // Ensure that database pages are in expected order and type.
 func TestDB_Consistency(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			_, err := tx.CreateBucket([]byte("widgets"))
-			return err
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		_, err := tx.CreateBucket([]byte("widgets"))
+		return err
+	})
 
-		for i := 0; i < 10; i++ {
-			db.Update(func(tx *Tx) error {
-				assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
-				return nil
-			})
-		}
+	for i := 0; i < 10; i++ {
 		db.Update(func(tx *Tx) error {
-			if p, _ := tx.Page(0); assert.NotNil(t, p) {
-				assert.Equal(t, "meta", p.Type)
-			}
-			if p, _ := tx.Page(1); assert.NotNil(t, p) {
-				assert.Equal(t, "meta", p.Type)
-			}
-			if p, _ := tx.Page(2); assert.NotNil(t, p) {
-				assert.Equal(t, "free", p.Type)
-			}
-			if p, _ := tx.Page(3); assert.NotNil(t, p) {
-				assert.Equal(t, "free", p.Type)
-			}
-			if p, _ := tx.Page(4); assert.NotNil(t, p) {
-				assert.Equal(t, "leaf", p.Type) // root leaf
-			}
-			if p, _ := tx.Page(5); assert.NotNil(t, p) {
-				assert.Equal(t, "freelist", p.Type)
-			}
-			p, _ := tx.Page(6)
-			assert.Nil(t, p)
+			assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
 			return nil
 		})
+	}
+	db.Update(func(tx *Tx) error {
+		if p, _ := tx.Page(0); assert.NotNil(t, p) {
+			assert.Equal(t, "meta", p.Type)
+		}
+		if p, _ := tx.Page(1); assert.NotNil(t, p) {
+			assert.Equal(t, "meta", p.Type)
+		}
+		if p, _ := tx.Page(2); assert.NotNil(t, p) {
+			assert.Equal(t, "free", p.Type)
+		}
+		if p, _ := tx.Page(3); assert.NotNil(t, p) {
+			assert.Equal(t, "free", p.Type)
+		}
+		if p, _ := tx.Page(4); assert.NotNil(t, p) {
+			assert.Equal(t, "leaf", p.Type) // root leaf
+		}
+		if p, _ := tx.Page(5); assert.NotNil(t, p) {
+			assert.Equal(t, "freelist", p.Type)
+		}
+		p, _ := tx.Page(6)
+		assert.Nil(t, p)
+		return nil
 	})
 }
 
@@ -451,16 +452,17 @@ func TestDB_StrictMode(t *testing.T) {
 			msg = fmt.Sprintf("%s", recover())
 		}()
 
-		withOpenDB(func(db *DB, path string) {
-			db.StrictMode = true
-			db.Update(func(tx *Tx) error {
-				tx.CreateBucket([]byte("foo"))
+		db := NewTestDB()
+		defer db.Close()
 
-				// Corrupt the DB by extending the high water mark.
-				tx.meta.pgid++
+		db.StrictMode = true
+		db.Update(func(tx *Tx) error {
+			tx.CreateBucket([]byte("foo"))
 
-				return nil
-			})
+			// Corrupt the DB by extending the high water mark.
+			tx.meta.pgid++
+
+			return nil
 		})
 	}()
 
@@ -474,15 +476,18 @@ func TestDB_DoubleFree(t *testing.T) {
 		defer func() {
 			msg = fmt.Sprintf("%s", recover())
 		}()
-		withOpenDB(func(db *DB, path string) {
-			db.Update(func(tx *Tx) error {
-				tx.CreateBucket([]byte("foo"))
 
-				// Corrupt the DB by adding a page to the freelist.
-				db.freelist.free(0, tx.page(3))
+		db := NewTestDB()
+		defer os.Remove(db.DB.Path())
+		defer db.DB.Close()
 
-				return nil
-			})
+		db.Update(func(tx *Tx) error {
+			tx.CreateBucket([]byte("foo"))
+
+			// Corrupt the DB by adding a page to the freelist.
+			db.freelist.free(0, tx.page(3))
+
+			return nil
 		})
 	}()
 
@@ -580,37 +585,53 @@ func ExampleDB_Begin_ReadOnly() {
 	// zephyr likes purple
 }
 
-// tempfile returns a temporary file path.
-func tempfile() string {
-	f, _ := ioutil.TempFile("", "bolt-")
-	f.Close()
-	os.Remove(f.Name())
-	return f.Name()
+// TestDB represents a wrapper around a Bolt DB to handle temporary file
+// creation and automatic cleanup on close.
+type TestDB struct {
+	*DB
 }
 
-// withOpenDB executes a function with an already opened database.
-func withOpenDB(fn func(*DB, string)) {
-	path := tempfile()
-	defer os.Remove(path)
-
-	db, err := Open(path, 0666, nil)
+// NewTestDB returns a new instance of TestDB.
+func NewTestDB() *TestDB {
+	db, err := Open(tempfile(), 0666, nil)
 	if err != nil {
 		panic("cannot open db: " + err.Error())
 	}
-	defer db.Close()
-	fn(db, path)
+	return &TestDB{db}
+}
 
+// Close closes the database and deletes the underlying file.
+func (db *TestDB) Close() {
 	// Log statistics.
 	if *statsFlag {
-		logStats(db)
+		db.PrintStats()
 	}
 
 	// Check database consistency after every test.
-	mustCheck(db)
+	db.MustCheck()
+
+	// Close database and remove file.
+	defer os.Remove(db.Path())
+	db.DB.Close()
 }
 
-// mustCheck runs a consistency check on the database and panics if any errors are found.
-func mustCheck(db *DB) {
+// PrintStats prints the database stats
+func (db *TestDB) PrintStats() {
+	var stats = db.Stats()
+	fmt.Printf("[db] %-20s %-20s %-20s\n",
+		fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
+		fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
+		fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
+	)
+	fmt.Printf("     %-20s %-20s %-20s\n",
+		fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
+		fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
+		fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
+	)
+}
+
+// MustCheck runs a consistency check on the database and panics if any errors are found.
+func (db *TestDB) MustCheck() {
 	db.View(func(tx *Tx) error {
 		// Collect all the errors.
 		var errors []error
@@ -643,6 +664,21 @@ func mustCheck(db *DB) {
 	})
 }
 
+// CopyTempFile copies a database to a temporary file.
+func (db *TestDB) CopyTempFile() {
+	path := tempfile()
+	db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
+	fmt.Println("db copied to: ", path)
+}
+
+// tempfile returns a temporary file path.
+func tempfile() string {
+	f, _ := ioutil.TempFile("", "bolt-")
+	f.Close()
+	os.Remove(f.Name())
+	return f.Name()
+}
+
 // mustContainKeys checks that a bucket contains a given set of keys.
 func mustContainKeys(b *Bucket, m map[string]string) {
 	found := make(map[string]string)
@@ -682,29 +718,6 @@ func trunc(b []byte, length int) []byte {
 	return b
 }
 
-// writes the current database stats to the testing log.
-func logStats(db *DB) {
-	var stats = db.Stats()
-	fmt.Printf("[db] %-20s %-20s %-20s\n",
-		fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
-		fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
-		fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
-	)
-	fmt.Printf("     %-20s %-20s %-20s\n",
-		fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
-		fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
-		fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
-	)
-}
-
 func truncDuration(d time.Duration) string {
 	return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
 }
-
-// copyAndFailNow copies a database to a new location and then fails then test.
-func copyAndFailNow(t *testing.T, db *DB) {
-	path := tempfile()
-	db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
-	fmt.Println("db copied to: ", path)
-	t.FailNow()
-}
diff --git a/simulation_test.go b/simulation_test.go
index 482349f..021d6db 100644
--- a/simulation_test.go
+++ b/simulation_test.go
@@ -41,78 +41,80 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
 
 	var versions = make(map[txid]*QuickDB)
 	versions[1] = NewQuickDB()
-	withOpenDB(func(db *DB, path string) {
-		var mutex sync.Mutex
 
-		// Run n threads in parallel, each with their own operation.
-		var wg sync.WaitGroup
-		var threads = make(chan bool, parallelism)
-		var i int
-		for {
-			threads <- true
-			wg.Add(1)
-			writable := ((rand.Int() % 100) < 20) // 20% writers
+	db := NewTestDB()
+	defer db.Close()
 
-			// Choose an operation to execute.
-			var handler simulateHandler
-			if writable {
-				handler = writerHandlers[rand.Intn(len(writerHandlers))]
-			} else {
-				handler = readerHandlers[rand.Intn(len(readerHandlers))]
-			}
+	var mutex sync.Mutex
 
-			// Execute a thread for the given operation.
-			go func(writable bool, handler simulateHandler) {
-				defer wg.Done()
+	// Run n threads in parallel, each with their own operation.
+	var wg sync.WaitGroup
+	var threads = make(chan bool, parallelism)
+	var i int
+	for {
+		threads <- true
+		wg.Add(1)
+		writable := ((rand.Int() % 100) < 20) // 20% writers
 
-				// Start transaction.
-				tx, err := db.Begin(writable)
-				if err != nil {
-					t.Fatal("tx begin: ", err)
-				}
-
-				// Obtain current state of the dataset.
-				mutex.Lock()
-				var qdb = versions[tx.id()]
-				if writable {
-					qdb = versions[tx.id()-1].Copy()
-				}
-				mutex.Unlock()
-
-				// Make sure we commit/rollback the tx at the end and update the state.
-				if writable {
-					defer func() {
-						mutex.Lock()
-						versions[tx.id()] = qdb
-						mutex.Unlock()
-
-						assert.NoError(t, tx.Commit())
-					}()
-				} else {
-					defer tx.Rollback()
-				}
-
-				// Ignore operation if we don't have data yet.
-				if qdb == nil {
-					return
-				}
-
-				// Execute handler.
-				handler(tx, qdb)
-
-				// Release a thread back to the scheduling loop.
-				<-threads
-			}(writable, handler)
-
-			i++
-			if i > threadCount {
-				break
-			}
+		// Choose an operation to execute.
+		var handler simulateHandler
+		if writable {
+			handler = writerHandlers[rand.Intn(len(writerHandlers))]
+		} else {
+			handler = readerHandlers[rand.Intn(len(readerHandlers))]
 		}
 
-		// Wait until all threads are done.
-		wg.Wait()
-	})
+		// Execute a thread for the given operation.
+		go func(writable bool, handler simulateHandler) {
+			defer wg.Done()
+
+			// Start transaction.
+			tx, err := db.Begin(writable)
+			if err != nil {
+				t.Fatal("tx begin: ", err)
+			}
+
+			// Obtain current state of the dataset.
+			mutex.Lock()
+			var qdb = versions[tx.id()]
+			if writable {
+				qdb = versions[tx.id()-1].Copy()
+			}
+			mutex.Unlock()
+
+			// Make sure we commit/rollback the tx at the end and update the state.
+			if writable {
+				defer func() {
+					mutex.Lock()
+					versions[tx.id()] = qdb
+					mutex.Unlock()
+
+					assert.NoError(t, tx.Commit())
+				}()
+			} else {
+				defer tx.Rollback()
+			}
+
+			// Ignore operation if we don't have data yet.
+			if qdb == nil {
+				return
+			}
+
+			// Execute handler.
+			handler(tx, qdb)
+
+			// Release a thread back to the scheduling loop.
+			<-threads
+		}(writable, handler)
+
+		i++
+		if i > threadCount {
+			break
+		}
+	}
+
+	// Wait until all threads are done.
+	wg.Wait()
 }
 
 type simulateHandler func(tx *Tx, qdb *QuickDB)
diff --git a/tx_test.go b/tx_test.go
index 0528c0d..55a31ea 100644
--- a/tx_test.go
+++ b/tx_test.go
@@ -11,265 +11,267 @@ import (
 
 // Ensure that committing a closed transaction returns an error.
 func TestTx_Commit_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.CreateBucket([]byte("foo"))
-		assert.NoError(t, tx.Commit())
-		assert.Equal(t, tx.Commit(), ErrTxClosed)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.CreateBucket([]byte("foo"))
+	assert.NoError(t, tx.Commit())
+	assert.Equal(t, tx.Commit(), ErrTxClosed)
 }
 
 // Ensure that rolling back a closed transaction returns an error.
 func TestTx_Rollback_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		assert.NoError(t, tx.Rollback())
-		assert.Equal(t, tx.Rollback(), ErrTxClosed)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	assert.NoError(t, tx.Rollback())
+	assert.Equal(t, tx.Rollback(), ErrTxClosed)
 }
 
 // Ensure that committing a read-only transaction returns an error.
 func TestTx_Commit_ReadOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(false)
-		assert.Equal(t, tx.Commit(), ErrTxNotWritable)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(false)
+	assert.Equal(t, tx.Commit(), ErrTxNotWritable)
 }
 
 // Ensure that a transaction can retrieve a cursor on the root bucket.
 func TestTx_Cursor(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.CreateBucket([]byte("woojits"))
-			c := tx.Cursor()
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.CreateBucket([]byte("woojits"))
+		c := tx.Cursor()
 
-			k, v := c.First()
-			assert.Equal(t, "widgets", string(k))
-			assert.Nil(t, v)
+		k, v := c.First()
+		assert.Equal(t, "widgets", string(k))
+		assert.Nil(t, v)
 
-			k, v = c.Next()
-			assert.Equal(t, "woojits", string(k))
-			assert.Nil(t, v)
+		k, v = c.Next()
+		assert.Equal(t, "woojits", string(k))
+		assert.Nil(t, v)
 
-			k, v = c.Next()
-			assert.Nil(t, k)
-			assert.Nil(t, v)
+		k, v = c.Next()
+		assert.Nil(t, k)
+		assert.Nil(t, v)
 
-			return nil
-		})
+		return nil
 	})
 }
 
 // Ensure that creating a bucket with a read-only transaction returns an error.
 func TestTx_CreateBucket_ReadOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.View(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("foo"))
-			assert.Nil(t, b)
-			assert.Equal(t, ErrTxNotWritable, err)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.View(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("foo"))
+		assert.Nil(t, b)
+		assert.Equal(t, ErrTxNotWritable, err)
+		return nil
 	})
 }
 
 // Ensure that creating a bucket on a closed transaction returns an error.
 func TestTx_CreateBucket_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.Commit()
-		b, err := tx.CreateBucket([]byte("foo"))
-		assert.Nil(t, b)
-		assert.Equal(t, ErrTxClosed, err)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.Commit()
+	b, err := tx.CreateBucket([]byte("foo"))
+	assert.Nil(t, b)
+	assert.Equal(t, ErrTxClosed, err)
 }
 
 // Ensure that a Tx can retrieve a bucket.
 func TestTx_Bucket(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			b := tx.Bucket([]byte("widgets"))
-			assert.NotNil(t, b)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		b := tx.Bucket([]byte("widgets"))
+		assert.NotNil(t, b)
+		return nil
 	})
 }
 
 // Ensure that a Tx retrieving a non-existent key returns nil.
 func TestTx_Get_Missing(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
-			assert.Nil(t, value)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
+		assert.Nil(t, value)
+		return nil
 	})
 }
 
 // Ensure that a bucket can be created and retrieved.
 func TestTx_CreateBucket(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		// Create a bucket.
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NotNil(t, b)
-			assert.NoError(t, err)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
 
-		// Read the bucket through a separate transaction.
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			assert.NotNil(t, b)
-			return nil
-		})
+	// Create a bucket.
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NotNil(t, b)
+		assert.NoError(t, err)
+		return nil
+	})
+
+	// Read the bucket through a separate transaction.
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		assert.NotNil(t, b)
+		return nil
 	})
 }
 
 // Ensure that a bucket can be created if it doesn't already exist.
 func TestTx_CreateBucketIfNotExists(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
-			assert.NotNil(t, b)
-			assert.NoError(t, err)
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
+		assert.NotNil(t, b)
+		assert.NoError(t, err)
 
-			b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
-			assert.NotNil(t, b)
-			assert.NoError(t, err)
+		b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
+		assert.NotNil(t, b)
+		assert.NoError(t, err)
 
-			b, err = tx.CreateBucketIfNotExists([]byte{})
-			assert.Nil(t, b)
-			assert.Equal(t, ErrBucketNameRequired, err)
+		b, err = tx.CreateBucketIfNotExists([]byte{})
+		assert.Nil(t, b)
+		assert.Equal(t, ErrBucketNameRequired, err)
 
-			b, err = tx.CreateBucketIfNotExists(nil)
-			assert.Nil(t, b)
-			assert.Equal(t, ErrBucketNameRequired, err)
-			return nil
-		})
+		b, err = tx.CreateBucketIfNotExists(nil)
+		assert.Nil(t, b)
+		assert.Equal(t, ErrBucketNameRequired, err)
+		return nil
+	})
 
-		// Read the bucket through a separate transaction.
-		db.View(func(tx *Tx) error {
-			b := tx.Bucket([]byte("widgets"))
-			assert.NotNil(t, b)
-			return nil
-		})
+	// Read the bucket through a separate transaction.
+	db.View(func(tx *Tx) error {
+		b := tx.Bucket([]byte("widgets"))
+		assert.NotNil(t, b)
+		return nil
 	})
 }
 
 // Ensure that a bucket cannot be created twice.
 func TestTx_CreateBucket_Exists(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		// Create a bucket.
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NotNil(t, b)
-			assert.NoError(t, err)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	// Create a bucket.
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NotNil(t, b)
+		assert.NoError(t, err)
+		return nil
+	})
 
-		// Create the same bucket again.
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.Nil(t, b)
-			assert.Equal(t, ErrBucketExists, err)
-			return nil
-		})
+	// Create the same bucket again.
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.Nil(t, b)
+		assert.Equal(t, ErrBucketExists, err)
+		return nil
 	})
 }
 
 // Ensure that a bucket is created with a non-blank name.
 func TestTx_CreateBucket_NameRequired(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			b, err := tx.CreateBucket(nil)
-			assert.Nil(t, b)
-			assert.Equal(t, ErrBucketNameRequired, err)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		b, err := tx.CreateBucket(nil)
+		assert.Nil(t, b)
+		assert.Equal(t, ErrBucketNameRequired, err)
+		return nil
 	})
 }
 
 // Ensure that a bucket can be deleted.
 func TestTx_DeleteBucket(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		// Create a bucket and add a value.
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
 
-		// Save root page id.
-		var root pgid
-		db.View(func(tx *Tx) error {
-			root = tx.Bucket([]byte("widgets")).root
-			return nil
-		})
+	// Create a bucket and add a value.
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		return nil
+	})
 
-		// Delete the bucket and make sure we can't get the value.
-		db.Update(func(tx *Tx) error {
-			assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
-			assert.Nil(t, tx.Bucket([]byte("widgets")))
-			return nil
-		})
+	// Save root page id.
+	var root pgid
+	db.View(func(tx *Tx) error {
+		root = tx.Bucket([]byte("widgets")).root
+		return nil
+	})
 
-		db.Update(func(tx *Tx) error {
-			// Verify that the bucket's page is free.
-			assert.Equal(t, []pgid{4, 5}, db.freelist.all())
+	// Delete the bucket and make sure we can't get the value.
+	db.Update(func(tx *Tx) error {
+		assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
+		assert.Nil(t, tx.Bucket([]byte("widgets")))
+		return nil
+	})
 
-			// Create the bucket again and make sure there's not a phantom value.
-			b, err := tx.CreateBucket([]byte("widgets"))
-			assert.NotNil(t, b)
-			assert.NoError(t, err)
-			assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
-			return nil
-		})
+	db.Update(func(tx *Tx) error {
+		// Verify that the bucket's page is free.
+		assert.Equal(t, []pgid{4, 5}, db.freelist.all())
+
+		// Create the bucket again and make sure there's not a phantom value.
+		b, err := tx.CreateBucket([]byte("widgets"))
+		assert.NotNil(t, b)
+		assert.NoError(t, err)
+		assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+		return nil
 	})
 }
 
 // Ensure that deleting a bucket on a closed transaction returns an error.
 func TestTx_DeleteBucket_Closed(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		tx, _ := db.Begin(true)
-		tx.Commit()
-		assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
-	})
+	db := NewTestDB()
+	defer db.Close()
+	tx, _ := db.Begin(true)
+	tx.Commit()
+	assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
 }
 
 // Ensure that deleting a bucket with a read-only transaction returns an error.
 func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.View(func(tx *Tx) error {
-			assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.View(func(tx *Tx) error {
+		assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
+		return nil
 	})
 }
 
 // Ensure that nothing happens when deleting a bucket that doesn't exist.
 func TestTx_DeleteBucket_NotFound(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
+		return nil
 	})
 }
 
 // Ensure that Tx commit handlers are called after a transaction successfully commits.
 func TestTx_OnCommit(t *testing.T) {
 	var x int
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.OnCommit(func() { x += 1 })
-			tx.OnCommit(func() { x += 2 })
-			_, err := tx.CreateBucket([]byte("widgets"))
-			return err
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.OnCommit(func() { x += 1 })
+		tx.OnCommit(func() { x += 2 })
+		_, err := tx.CreateBucket([]byte("widgets"))
+		return err
 	})
 	assert.Equal(t, 3, x)
 }
@@ -277,39 +279,39 @@ func TestTx_OnCommit(t *testing.T) {
 // Ensure that Tx commit handlers are NOT called after a transaction rolls back.
 func TestTx_OnCommit_Rollback(t *testing.T) {
 	var x int
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.OnCommit(func() { x += 1 })
-			tx.OnCommit(func() { x += 2 })
-			tx.CreateBucket([]byte("widgets"))
-			return errors.New("rollback this commit")
-		})
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.OnCommit(func() { x += 1 })
+		tx.OnCommit(func() { x += 2 })
+		tx.CreateBucket([]byte("widgets"))
+		return errors.New("rollback this commit")
 	})
 	assert.Equal(t, 0, x)
 }
 
 // Ensure that the database can be copied to a file path.
 func TestTx_CopyFile(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		var dest = tempfile()
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
-			return nil
-		})
+	db := NewTestDB()
+	defer db.Close()
+	var dest = tempfile()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
+		return nil
+	})
 
-		assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) }))
+	assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) }))
 
-		db2, err := Open(dest, 0600, nil)
-		assert.NoError(t, err)
-		defer db2.Close()
+	db2, err := Open(dest, 0600, nil)
+	assert.NoError(t, err)
+	defer db2.Close()
 
-		db2.View(func(tx *Tx) error {
-			assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
-			assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
-			return nil
-		})
+	db2.View(func(tx *Tx) error {
+		assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+		assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
+		return nil
 	})
 }
 
@@ -336,32 +338,32 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
 
 // Ensure that Copy handles write errors right.
 func TestTx_CopyFile_Error_Meta(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
-			return nil
-		})
-
-		err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
-		assert.EqualError(t, err, "meta copy: error injected for tests")
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
+		return nil
 	})
+
+	err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
+	assert.EqualError(t, err, "meta copy: error injected for tests")
 }
 
 // Ensure that Copy handles write errors right.
 func TestTx_CopyFile_Error_Normal(t *testing.T) {
-	withOpenDB(func(db *DB, path string) {
-		db.Update(func(tx *Tx) error {
-			tx.CreateBucket([]byte("widgets"))
-			tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
-			tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
-			return nil
-		})
-
-		err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
-		assert.EqualError(t, err, "error injected for tests")
+	db := NewTestDB()
+	defer db.Close()
+	db.Update(func(tx *Tx) error {
+		tx.CreateBucket([]byte("widgets"))
+		tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+		tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
+		return nil
 	})
+
+	err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
+	assert.EqualError(t, err, "error injected for tests")
 }
 
 func ExampleTx_Rollback() {