mirror of https://github.com/etcd-io/bbolt.git
Expand assertion statements.
This commit expands calls to _assert() that use variadic arguments. These calls require conversion to interface{} so there was a large number of calls to Go's internal convT2E() function. In some profiling this was taking over 20% of total runtime. I don't remember seeing this before Go 1.4 so perhaps something has changed.pull/34/head
parent
681a5db8f6
commit
b4d00c394a
16
bucket.go
16
bucket.go
|
@ -511,8 +511,12 @@ func (b *Bucket) spill() error {
|
||||||
// Update parent node.
|
// Update parent node.
|
||||||
var c = b.Cursor()
|
var c = b.Cursor()
|
||||||
k, _, flags := c.seek([]byte(name))
|
k, _, flags := c.seek([]byte(name))
|
||||||
_assert(bytes.Equal([]byte(name), k), "misplaced bucket header: %x -> %x", []byte(name), k)
|
if !bytes.Equal([]byte(name), k) {
|
||||||
_assert(flags&bucketLeafFlag != 0, "unexpected bucket header flag: %x", flags)
|
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||||
|
}
|
||||||
|
if flags&bucketLeafFlag == 0 {
|
||||||
|
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||||
|
}
|
||||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -528,7 +532,9 @@ func (b *Bucket) spill() error {
|
||||||
b.rootNode = b.rootNode.root()
|
b.rootNode = b.rootNode.root()
|
||||||
|
|
||||||
// Update the root node for this bucket.
|
// Update the root node for this bucket.
|
||||||
_assert(b.rootNode.pgid < b.tx.meta.pgid, "pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)
|
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||||
|
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||||
|
}
|
||||||
b.root = b.rootNode.pgid
|
b.root = b.rootNode.pgid
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -659,7 +665,9 @@ func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||||
// Inline buckets have a fake page embedded in their value so treat them
|
// Inline buckets have a fake page embedded in their value so treat them
|
||||||
// differently. We'll return the rootNode (if available) or the fake page.
|
// differently. We'll return the rootNode (if available) or the fake page.
|
||||||
if b.root == 0 {
|
if b.root == 0 {
|
||||||
_assert(id == 0, "inline bucket non-zero page access(2): %d != 0", id)
|
if id != 0 {
|
||||||
|
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||||
|
}
|
||||||
if b.rootNode != nil {
|
if b.rootNode != nil {
|
||||||
return nil, b.rootNode
|
return nil, b.rootNode
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package bolt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -228,8 +229,8 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||||
p, n := c.bucket.pageNode(pgid)
|
p, n := c.bucket.pageNode(pgid)
|
||||||
if p != nil {
|
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||||
_assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: %d: %x", p.id, p.flags)
|
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||||
}
|
}
|
||||||
e := elemRef{page: p, node: n}
|
e := elemRef{page: p, node: n}
|
||||||
c.stack = append(c.stack, e)
|
c.stack = append(c.stack, e)
|
||||||
|
|
8
db.go
8
db.go
|
@ -664,9 +664,11 @@ func (m *meta) copy(dest *meta) {
|
||||||
|
|
||||||
// write writes the meta onto a page.
|
// write writes the meta onto a page.
|
||||||
func (m *meta) write(p *page) {
|
func (m *meta) write(p *page) {
|
||||||
|
if m.root.root >= m.pgid {
|
||||||
_assert(m.root.root < m.pgid, "root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)
|
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
|
||||||
_assert(m.freelist < m.pgid, "freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)
|
} else if m.freelist >= m.pgid {
|
||||||
|
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
|
||||||
|
}
|
||||||
|
|
||||||
// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
|
// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
|
||||||
p.id = pgid(m.txid % 2)
|
p.id = pgid(m.txid % 2)
|
||||||
|
|
13
freelist.go
13
freelist.go
|
@ -1,6 +1,7 @@
|
||||||
package bolt
|
package bolt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
@ -67,7 +68,9 @@ func (f *freelist) allocate(n int) pgid {
|
||||||
|
|
||||||
var initial, previd pgid
|
var initial, previd pgid
|
||||||
for i, id := range f.ids {
|
for i, id := range f.ids {
|
||||||
_assert(id > 1, "invalid page allocation: %d", id)
|
if id <= 1 {
|
||||||
|
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||||
|
}
|
||||||
|
|
||||||
// Reset initial page if this is not contiguous.
|
// Reset initial page if this is not contiguous.
|
||||||
if previd == 0 || id-previd != 1 {
|
if previd == 0 || id-previd != 1 {
|
||||||
|
@ -103,13 +106,17 @@ func (f *freelist) allocate(n int) pgid {
|
||||||
// free releases a page and its overflow for a given transaction id.
|
// free releases a page and its overflow for a given transaction id.
|
||||||
// If the page is already free then a panic will occur.
|
// If the page is already free then a panic will occur.
|
||||||
func (f *freelist) free(txid txid, p *page) {
|
func (f *freelist) free(txid txid, p *page) {
|
||||||
_assert(p.id > 1, "cannot free page 0 or 1: %d", p.id)
|
if p.id <= 1 {
|
||||||
|
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||||
|
}
|
||||||
|
|
||||||
// Free page and all its overflow pages.
|
// Free page and all its overflow pages.
|
||||||
var ids = f.pending[txid]
|
var ids = f.pending[txid]
|
||||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||||
// Verify that page is not already free.
|
// Verify that page is not already free.
|
||||||
_assert(!f.cache[id], "page %d already freed", id)
|
if f.cache[id] {
|
||||||
|
panic(fmt.Sprintf("page %d already freed", id))
|
||||||
|
}
|
||||||
|
|
||||||
// Add to the freelist and cache.
|
// Add to the freelist and cache.
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
|
|
23
node.go
23
node.go
|
@ -2,6 +2,7 @@ package bolt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
@ -70,7 +71,9 @@ func (n *node) pageElementSize() int {
|
||||||
|
|
||||||
// childAt returns the child node at a given index.
|
// childAt returns the child node at a given index.
|
||||||
func (n *node) childAt(index int) *node {
|
func (n *node) childAt(index int) *node {
|
||||||
_assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
|
if n.isLeaf {
|
||||||
|
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||||
|
}
|
||||||
return n.bucket.node(n.inodes[index].pgid, n)
|
return n.bucket.node(n.inodes[index].pgid, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,9 +114,13 @@ func (n *node) prevSibling() *node {
|
||||||
|
|
||||||
// put inserts a key/value.
|
// put inserts a key/value.
|
||||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||||
_assert(pgid < n.bucket.tx.meta.pgid, "pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)
|
if pgid >= n.bucket.tx.meta.pgid {
|
||||||
_assert(len(oldKey) > 0, "put: zero-length old key")
|
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||||
_assert(len(newKey) > 0, "put: zero-length new key")
|
} else if len(oldKey) <= 0 {
|
||||||
|
panic("put: zero-length old key")
|
||||||
|
} else if len(newKey) <= 0 {
|
||||||
|
panic("put: zero-length new key")
|
||||||
|
}
|
||||||
|
|
||||||
// Find insertion index.
|
// Find insertion index.
|
||||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||||
|
@ -189,7 +196,9 @@ func (n *node) write(p *page) {
|
||||||
p.flags |= branchPageFlag
|
p.flags |= branchPageFlag
|
||||||
}
|
}
|
||||||
|
|
||||||
_assert(len(n.inodes) < 0xFFFF, "inode overflow: %d (pgid=%d)", len(n.inodes), p.id)
|
if len(n.inodes) >= 0xFFFF {
|
||||||
|
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||||
|
}
|
||||||
p.count = uint16(len(n.inodes))
|
p.count = uint16(len(n.inodes))
|
||||||
|
|
||||||
// Loop over each item and write it to the page.
|
// Loop over each item and write it to the page.
|
||||||
|
@ -348,7 +357,9 @@ func (n *node) spill() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the node.
|
// Write the node.
|
||||||
_assert(p.id < tx.meta.pgid, "pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)
|
if p.id >= tx.meta.pgid {
|
||||||
|
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||||
|
}
|
||||||
node.pgid = p.id
|
node.pgid = p.id
|
||||||
node.write(p)
|
node.write(p)
|
||||||
node.spilled = true
|
node.spilled = true
|
||||||
|
|
Loading…
Reference in New Issue