mirror of https://github.com/etcd-io/bbolt.git
Merge pull request #201 from jrick/checkptr
Fix unsafe pointer conversions caught by Go 1.14 checkptrpull/210/head
commit
2fc6815ca0
|
@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
19
bolt_arm.go
19
bolt_arm.go
|
@ -7,22 +7,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0x8000000000 // 512GB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0x40000000 // 1GB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = true
|
||||
|
|
|
@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
|||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
|
|
30
bucket.go
30
bucket.go
|
@ -123,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
|
|||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
// Unaligned access requires a copy to be made.
|
||||
const unalignedMask = unsafe.Alignof(struct {
|
||||
bucket
|
||||
page
|
||||
}{}) - 1
|
||||
unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
@ -409,7 +411,7 @@ func (b *Bucket) Stats() BucketStats {
|
|||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
used += leafPageElementSize * uintptr(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
|
@ -417,16 +419,16 @@ func (b *Bucket) Stats() BucketStats {
|
|||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
s.InlineBucketInuse += int(used)
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafInuse += int(used)
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
|
@ -447,13 +449,13 @@ func (b *Bucket) Stats() BucketStats {
|
|||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
used += uintptr(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += int(used)
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
|
@ -593,7 +595,7 @@ func (b *Bucket) inlineable() bool {
|
|||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
|
@ -606,8 +608,8 @@ func (b *Bucket) inlineable() bool {
|
|||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
func (b *Bucket) maxInlineBucketSize() uintptr {
|
||||
return uintptr(b.tx.db.pageSize / 4)
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
|
|
|
@ -1358,7 +1358,7 @@ func ExampleDB_View() {
|
|||
// John's last name is doe.
|
||||
}
|
||||
|
||||
func ExampleDB_Begin_ReadOnly() {
|
||||
func ExampleDB_Begin() {
|
||||
// Open the database.
|
||||
db, err := bolt.Open(tempfile(), 0666, nil)
|
||||
if err != nil {
|
||||
|
|
37
freelist.go
37
freelist.go
|
@ -2,6 +2,7 @@ package bbolt
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
@ -71,7 +72,7 @@ func (f *freelist) size() int {
|
|||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
|
@ -93,8 +94,24 @@ func (f *freelist) pending_count() int {
|
|||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// copyallunsafe copies a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
m = append(m, txp.ids...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
fpgids := f.getFreePageIDs()
|
||||
sz := len(fpgids) + len(m)
|
||||
dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(dstptr),
|
||||
Len: sz,
|
||||
Cap: sz,
|
||||
}))
|
||||
mergepgids(dst, fpgids, m)
|
||||
}
|
||||
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, txp := range f.pending {
|
||||
|
@ -267,17 +284,21 @@ func (f *freelist) read(p *page) {
|
|||
}
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
var idx, count uintptr = 0, uintptr(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))))
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
|
||||
ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)),
|
||||
Len: int(count),
|
||||
Cap: int(count),
|
||||
}))
|
||||
|
||||
// copy the ids, so we don't modify on the freelist page directly
|
||||
idsCopy := make([]pgid, count)
|
||||
|
@ -315,11 +336,11 @@ func (f *freelist) write(p *page) error {
|
|||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids)
|
||||
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0))))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -262,7 +262,7 @@ func TestFreelist_read(t *testing.T) {
|
|||
page.count = 2
|
||||
|
||||
// Insert 2 page ids.
|
||||
ids := (*[3]pgid)(unsafe.Pointer(&page.ptr))
|
||||
ids := (*[3]pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page)))
|
||||
ids[0] = 23
|
||||
ids[1] = 50
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -1,3 +1,5 @@
|
|||
module go.etcd.io/bbolt
|
||||
|
||||
go 1.12
|
||||
|
||||
require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
61
node.go
61
node.go
|
@ -3,6 +3,7 @@ package bbolt
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
@ -41,19 +42,19 @@ func (n *node) size() int {
|
|||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
}
|
||||
return sz
|
||||
return int(sz)
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
func (n *node) sizeLessThan(v uintptr) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
|
@ -62,7 +63,7 @@ func (n *node) sizeLessThan(v int) bool {
|
|||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
func (n *node) pageElementSize() uintptr {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
|
@ -207,39 +208,39 @@ func (n *node) write(p *page) {
|
|||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
// Create a slice to write into of needed size and advance
|
||||
// byte pointer for next iteration.
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
sz := klen + vlen
|
||||
b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: bp,
|
||||
Len: sz,
|
||||
Cap: sz,
|
||||
}))
|
||||
bp += uintptr(sz)
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
l := copy(b, item.key)
|
||||
copy(b[l:], item.value)
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
|
@ -247,7 +248,7 @@ func (n *node) write(p *page) {
|
|||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
func (n *node) split(pageSize uintptr) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
|
@ -270,7 +271,7 @@ func (n *node) split(pageSize int) []*node {
|
|||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
|
@ -312,18 +313,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
|
|||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
func (n *node) splitIndex(threshold int) (index, sz uintptr) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
index = uintptr(i)
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -356,7 +357,7 @@ func (n *node) spill() error {
|
|||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
var nodes = n.split(uintptr(tx.db.pageSize))
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
|
@ -587,9 +588,11 @@ func (n *node) dump() {
|
|||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool {
|
||||
return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
|
||||
}
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestNode_read_LeafPage(t *testing.T) {
|
|||
page.count = 2
|
||||
|
||||
// Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
|
||||
nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
|
||||
nodes := (*[3]leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page)))
|
||||
nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2
|
||||
nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
|
||||
|
||||
|
|
56
page.go
56
page.go
|
@ -3,16 +3,17 @@ package bbolt
|
|||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
const pageHeaderSize = unsafe.Sizeof(page{})
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
|
||||
const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
|
@ -32,7 +33,6 @@ type page struct {
|
|||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
|
@ -51,13 +51,13 @@ func (p *page) typ() string {
|
|||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
off := uintptr(index) * unsafe.Sizeof(leafPageElement{})
|
||||
return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
|
@ -65,12 +65,17 @@ func (p *page) leafPageElements() []leafPageElement {
|
|||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
|
||||
Len: int(p.count),
|
||||
Cap: int(p.count),
|
||||
}))
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
off := uintptr(index) * unsafe.Sizeof(branchPageElement{})
|
||||
return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
|
@ -78,12 +83,20 @@ func (p *page) branchPageElements() []branchPageElement {
|
|||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
|
||||
Len: int(p.count),
|
||||
Cap: int(p.count),
|
||||
}))
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)),
|
||||
Len: n,
|
||||
Cap: n,
|
||||
}))
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
|
@ -102,8 +115,11 @@ type branchPageElement struct {
|
|||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
|
||||
Len: int(n.ksize),
|
||||
Cap: int(n.ksize),
|
||||
}))
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
|
@ -116,14 +132,20 @@ type leafPageElement struct {
|
|||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
|
||||
Len: int(n.ksize),
|
||||
Cap: int(n.ksize),
|
||||
}))
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize),
|
||||
Len: int(n.vsize),
|
||||
Cap: int(n.vsize),
|
||||
}))
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
|
|
17
tx.go
17
tx.go
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -527,7 +528,7 @@ func (tx *Tx) write() error {
|
|||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
ptr := uintptr(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
|
@ -536,7 +537,11 @@ func (tx *Tx) write() error {
|
|||
}
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: ptr,
|
||||
Len: sz,
|
||||
Cap: sz,
|
||||
}))
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -552,7 +557,7 @@ func (tx *Tx) write() error {
|
|||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
ptr += uintptr(sz)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -571,7 +576,11 @@ func (tx *Tx) write() error {
|
|||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Data: uintptr(unsafe.Pointer(p)),
|
||||
Len: tx.db.pageSize,
|
||||
Cap: tx.db.pageSize,
|
||||
}))
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
|
|
Loading…
Reference in New Issue