Merge pull request #292 from benbjohnson/fix-size

Fix mmap resize calculation.
pull/34/head
Ben Johnson 2015-01-28 16:58:01 -05:00
commit 681a5db8f6
2 changed files with 79 additions and 18 deletions

49
db.go
View File

@ -12,9 +12,6 @@ import (
"unsafe"
)
// The smallest size that the mmap can be.
const minMmapSize = 1 << 22 // 4MB
// The largest step that can be taken when remapping the mmap.
const maxMmapStep = 1 << 30 // 1GB
@ -174,11 +171,9 @@ func (db *DB) mmap(minsz int) error {
if size < minsz {
size = minsz
}
size = db.mmapSize(size)
// Verify the map size is not above the maximum allowed.
if size > maxMapSize {
return fmt.Errorf("mmap too large")
size, err = db.mmapSize(size)
if err != nil {
return err
}
// Dereference all mmap references before unmapping.
@ -221,21 +216,39 @@ func (db *DB) munmap() error {
// mmapSize determines the appropriate size for the mmap given the current size
// of the database. The minimum size is 4MB and doubles until it reaches 1GB.
func (db *DB) mmapSize(size int) int {
if size <= minMmapSize {
return minMmapSize
} else if size < maxMmapStep {
size *= 2
} else {
size += maxMmapStep
// Returns an error if the new mmap size is greater than the max allowed.
func (db *DB) mmapSize(size int) (int, error) {
// Double the size from 1MB until 1GB.
for i := uint(20); i <= 30; i++ {
if size <= 1<<i {
return 1 << i, nil
}
}
// Verify the requested size is not above the maximum allowed.
if size > maxMapSize {
return 0, fmt.Errorf("mmap too large")
}
// If larger than 1GB then grow by 1GB at a time.
sz := int64(size) + int64(maxMmapStep)
if remainder := sz % int64(maxMmapStep); remainder > 0 {
sz -= remainder
}
// Ensure that the mmap size is a multiple of the page size.
if (size % db.pageSize) != 0 {
size = ((size / db.pageSize) + 1) * db.pageSize
// This should always be true since we're incrementing in MBs.
pageSize := int64(db.pageSize)
if (sz % pageSize) != 0 {
sz = ((sz / pageSize) + 1) * pageSize
}
return size
// If we've exceeded the max size then only grow up to the max size.
if sz > maxMapSize {
sz = maxMapSize
}
return int(sz), nil
}
// init creates a new database file and initializes its meta pages.

View File

@ -85,6 +85,46 @@ func TestOpen_Wait(t *testing.T) {
assert(t, time.Since(start) > 100*time.Millisecond, "")
}
// Ensure that opening a database does not increase its size.
// https://github.com/boltdb/bolt/issues/291
func TestOpen_Size(t *testing.T) {
// Open a data file.
db := NewTestDB()
path := db.Path()
defer db.Close()
// Insert until we get above the minimum 4MB size.
ok(t, db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for i := 0; i < 10000; i++ {
ok(t, b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)))
}
return nil
}))
// Close database and grab the size.
db.DB.Close()
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
}
// Reopen database, update, and check size again.
db0, err := bolt.Open(path, 0666, nil)
ok(t, err)
ok(t, db0.Update(func(tx *bolt.Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) }))
ok(t, db0.Close())
newSz := fileSize(path)
if newSz == 0 {
t.Fatalf("unexpected new file size: %d", newSz)
}
// Compare the original size with the new size.
if sz != newSz {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
// Ensure that a re-opened database is consistent.
func TestOpen_Check(t *testing.T) {
path := tempfile()
@ -648,3 +688,11 @@ func trunc(b []byte, length int) []byte {
func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
}
func fileSize(path string) int64 {
fi, err := os.Stat(path)
if err != nil {
return 0
}
return fi.Size()
}