mirror of https://github.com/etcd-io/bbolt.git
Change fill percent to be per-bucket.
This commit moves the DB.FillPercent field to Bucket.FillPercent. This allows the fill percentage to be specified per-bucket, per-tx. This value is not persisted and should be set whenever using it.pull/34/head
parent
5fb781318f
commit
c3400efefd
22
bucket.go
22
bucket.go
|
@ -23,6 +23,15 @@ const (
|
||||||
|
|
||||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||||
|
|
||||||
|
const (
|
||||||
|
minFillPercent = 0.1
|
||||||
|
maxFillPercent = 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
// DefaultFillPercent is the percentage that split pages are filled.
|
||||||
|
// This value can be changed by setting Bucket.FillPercent.
|
||||||
|
const DefaultFillPercent = 0.5
|
||||||
|
|
||||||
// Bucket represents a collection of key/value pairs inside the database.
|
// Bucket represents a collection of key/value pairs inside the database.
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
*bucket
|
*bucket
|
||||||
|
@ -31,6 +40,11 @@ type Bucket struct {
|
||||||
page *page // inline page reference
|
page *page // inline page reference
|
||||||
rootNode *node // materialized node for the root page.
|
rootNode *node // materialized node for the root page.
|
||||||
nodes map[pgid]*node // node cache
|
nodes map[pgid]*node // node cache
|
||||||
|
|
||||||
|
// Sets the threshold for filling nodes when they split. By default,
|
||||||
|
// the bucket will fill to 50% but it can be useful to increase this
|
||||||
|
// amount if you know that your write workloads are mostly append-only.
|
||||||
|
FillPercent float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// bucket represents the on-file representation of a bucket.
|
// bucket represents the on-file representation of a bucket.
|
||||||
|
@ -44,7 +58,7 @@ type bucket struct {
|
||||||
|
|
||||||
// newBucket returns a new bucket associated with a transaction.
|
// newBucket returns a new bucket associated with a transaction.
|
||||||
func newBucket(tx *Tx) Bucket {
|
func newBucket(tx *Tx) Bucket {
|
||||||
var b = Bucket{tx: tx}
|
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||||
if tx.writable {
|
if tx.writable {
|
||||||
b.buckets = make(map[string]*Bucket)
|
b.buckets = make(map[string]*Bucket)
|
||||||
b.nodes = make(map[pgid]*node)
|
b.nodes = make(map[pgid]*node)
|
||||||
|
@ -155,7 +169,11 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create empty, inline bucket.
|
// Create empty, inline bucket.
|
||||||
var bucket = Bucket{bucket: &bucket{}, rootNode: &node{isLeaf: true}}
|
var bucket = Bucket{
|
||||||
|
bucket: &bucket{},
|
||||||
|
rootNode: &node{isLeaf: true},
|
||||||
|
FillPercent: DefaultFillPercent,
|
||||||
|
}
|
||||||
var value = bucket.write()
|
var value = bucket.write()
|
||||||
|
|
||||||
// Insert into node.
|
// Insert into node.
|
||||||
|
|
|
@ -674,8 +674,6 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.FillPercent = 0.9
|
|
||||||
|
|
||||||
// Add a set of values in random order. It will be the same random
|
// Add a set of values in random order. It will be the same random
|
||||||
// order so we can maintain consistency between test runs.
|
// order so we can maintain consistency between test runs.
|
||||||
var count int
|
var count int
|
||||||
|
@ -683,6 +681,7 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
|
||||||
for _, i := range r.Perm(1000) {
|
for _, i := range r.Perm(1000) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
|
b, _ := tx.CreateBucketIfNotExists([]byte("woojits"))
|
||||||
|
b.FillPercent = 0.9
|
||||||
for _, j := range r.Perm(100) {
|
for _, j := range r.Perm(100) {
|
||||||
index := (j * 10000) + i
|
index := (j * 10000) + i
|
||||||
b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000"))
|
b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000"))
|
||||||
|
|
|
@ -47,7 +47,6 @@ func Bench(options *BenchOptions) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
db.NoSync = options.NoSync
|
db.NoSync = options.NoSync
|
||||||
db.FillPercent = options.FillPercent
|
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
// Enable streaming stats.
|
// Enable streaming stats.
|
||||||
|
@ -140,6 +139,7 @@ func benchWriteWithSource(db *bolt.DB, options *BenchOptions, results *BenchResu
|
||||||
for i := 0; i < options.Iterations; i += options.BatchSize {
|
for i := 0; i < options.Iterations; i += options.BatchSize {
|
||||||
err := db.Update(func(tx *bolt.Tx) error {
|
err := db.Update(func(tx *bolt.Tx) error {
|
||||||
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
|
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
|
||||||
|
b.FillPercent = options.FillPercent
|
||||||
|
|
||||||
for j := 0; j < options.BatchSize; j++ {
|
for j := 0; j < options.BatchSize; j++ {
|
||||||
var key = make([]byte, options.KeySize)
|
var key = make([]byte, options.KeySize)
|
||||||
|
@ -165,10 +165,12 @@ func benchWriteNestedWithSource(db *bolt.DB, options *BenchOptions, results *Ben
|
||||||
for i := 0; i < options.Iterations; i += options.BatchSize {
|
for i := 0; i < options.Iterations; i += options.BatchSize {
|
||||||
err := db.Update(func(tx *bolt.Tx) error {
|
err := db.Update(func(tx *bolt.Tx) error {
|
||||||
top, _ := tx.CreateBucketIfNotExists(benchBucketName)
|
top, _ := tx.CreateBucketIfNotExists(benchBucketName)
|
||||||
|
top.FillPercent = options.FillPercent
|
||||||
|
|
||||||
var name = make([]byte, options.KeySize)
|
var name = make([]byte, options.KeySize)
|
||||||
binary.BigEndian.PutUint32(name, keySource())
|
binary.BigEndian.PutUint32(name, keySource())
|
||||||
b, _ := top.CreateBucketIfNotExists(name)
|
b, _ := top.CreateBucketIfNotExists(name)
|
||||||
|
b.FillPercent = options.FillPercent
|
||||||
|
|
||||||
for j := 0; j < options.BatchSize; j++ {
|
for j := 0; j < options.BatchSize; j++ {
|
||||||
var key = make([]byte, options.KeySize)
|
var key = make([]byte, options.KeySize)
|
||||||
|
|
16
db.go
16
db.go
|
@ -23,15 +23,6 @@ const version = 2
|
||||||
// Represents a marker value to indicate that a file is a Bolt DB.
|
// Represents a marker value to indicate that a file is a Bolt DB.
|
||||||
const magic uint32 = 0xED0CDAED
|
const magic uint32 = 0xED0CDAED
|
||||||
|
|
||||||
const (
|
|
||||||
minFillPercent = 0.1
|
|
||||||
maxFillPercent = 1.0
|
|
||||||
)
|
|
||||||
|
|
||||||
// DefaultFillPercent is the percentage that split pages are filled.
|
|
||||||
// This value can be changed by setting DB.FillPercent.
|
|
||||||
const DefaultFillPercent = 0.5
|
|
||||||
|
|
||||||
// DB represents a collection of buckets persisted to a file on disk.
|
// DB represents a collection of buckets persisted to a file on disk.
|
||||||
// All data access is performed through transactions which can be obtained through the DB.
|
// All data access is performed through transactions which can be obtained through the DB.
|
||||||
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
|
||||||
|
@ -42,11 +33,6 @@ type DB struct {
|
||||||
// debugging purposes.
|
// debugging purposes.
|
||||||
StrictMode bool
|
StrictMode bool
|
||||||
|
|
||||||
// Sets the threshold for filling nodes when they split. By default,
|
|
||||||
// the database will fill to 50% but it can be useful to increase this
|
|
||||||
// amount if you know that your write workloads are mostly append-only.
|
|
||||||
FillPercent float64
|
|
||||||
|
|
||||||
// Setting the NoSync flag will cause the database to skip fsync()
|
// Setting the NoSync flag will cause the database to skip fsync()
|
||||||
// calls after each commit. This can be useful when bulk loading data
|
// calls after each commit. This can be useful when bulk loading data
|
||||||
// into a database and you can restart the bulk load in the event of
|
// into a database and you can restart the bulk load in the event of
|
||||||
|
@ -99,7 +85,7 @@ func (db *DB) String() string {
|
||||||
// If the file does not exist then it will be created automatically.
|
// If the file does not exist then it will be created automatically.
|
||||||
// Passing in nil options will cause Bolt to open the database with the default options.
|
// Passing in nil options will cause Bolt to open the database with the default options.
|
||||||
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
|
||||||
var db = &DB{opened: true, FillPercent: DefaultFillPercent}
|
var db = &DB{opened: true}
|
||||||
|
|
||||||
// Set default options if no options are provided.
|
// Set default options if no options are provided.
|
||||||
if options == nil {
|
if options == nil {
|
||||||
|
|
2
node.go
2
node.go
|
@ -255,7 +255,7 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the threshold before starting a new node.
|
// Determine the threshold before starting a new node.
|
||||||
var fillPercent = n.bucket.tx.db.FillPercent
|
var fillPercent = n.bucket.FillPercent
|
||||||
if fillPercent < minFillPercent {
|
if fillPercent < minFillPercent {
|
||||||
fillPercent = minFillPercent
|
fillPercent = minFillPercent
|
||||||
} else if fillPercent > maxFillPercent {
|
} else if fillPercent > maxFillPercent {
|
||||||
|
|
Loading…
Reference in New Issue