Merge branch 'master' into patch

pull/223/head
Xiang Li 2021-04-23 17:35:38 -07:00 committed by GitHub
commit 35345c50ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 315 additions and 237 deletions

View File

@ -4,9 +4,10 @@ go_import_path: go.etcd.io/bbolt
sudo: false
go:
- 1.12
- 1.15
before_install:
- go get -v golang.org/x/sys/unix
- go get -v honnef.co/go/tools/...
- go get -v github.com/kisielk/errcheck

View File

@ -908,12 +908,14 @@ Below is a list of public, open source projects that use Bolt:
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
@ -938,9 +940,8 @@ Below is a list of public, open source projects that use Bolt:
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library.
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.

View File

@ -7,6 +7,8 @@ import (
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
@ -55,7 +57,7 @@ func mmap(db *DB, sz int) error {
}
// Advise the kernel that the mmap is accessed randomly.
err = madvise(b, syscall.MADV_RANDOM)
err = unix.Madvise(b, syscall.MADV_RANDOM)
if err != nil && err != syscall.ENOSYS {
// Ignore not implemented error in kernel because it still works.
return fmt.Errorf("madvise: %s", err)
@ -82,12 +84,3 @@ func munmap(db *DB) error {
db.datasz = 0
return err
}
// NOTE: This function is copied from stdlib because it is not available on darwin.
func madvise(b []byte, advice int) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
if e1 != 0 {
err = e1
}
return
}

View File

@ -1185,6 +1185,10 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) {
// Ensure a bucket can calculate stats.
func TestBucket_Stats(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
db := MustOpenDB()
defer db.MustClose()

View File

@ -1993,7 +1993,7 @@ func (cmd *CompactCommand) Run(args ...string) (err error) {
defer dst.Close()
// Run compaction.
if err := cmd.compact(dst, src); err != nil {
if err := bolt.Compact(dst, src, cmd.TxMaxSize); err != nil {
return err
}
@ -2009,114 +2009,6 @@ func (cmd *CompactCommand) Run(args ...string) (err error) {
return nil
}
func (cmd *CompactCommand) compact(dst, src *bolt.DB) error {
// commit regularly, or we'll run out of memory for large datasets if using one transaction.
var size int64
tx, err := dst.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
// On each key/value, check if we have exceeded tx size.
sz := int64(len(k) + len(v))
if size+sz > cmd.TxMaxSize && cmd.TxMaxSize != 0 {
// Commit previous transaction.
if err := tx.Commit(); err != nil {
return err
}
// Start new transaction.
tx, err = dst.Begin(true)
if err != nil {
return err
}
size = 0
}
size += sz
// Create bucket on the root transaction if this is the first level.
nk := len(keys)
if nk == 0 {
bkt, err := tx.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Create buckets on subsequent levels, if necessary.
b := tx.Bucket(keys[0])
if nk > 1 {
for _, k := range keys[1:] {
b = b.Bucket(k)
}
}
// Fill the entire page for best compaction.
b.FillPercent = 1.0
// If there is no value then this is a bucket call.
if v == nil {
bkt, err := b.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Otherwise treat it as a key/value pair.
return b.Put(k, v)
}); err != nil {
return err
}
return tx.Commit()
}
// walkFunc is the type of the function called for keys (buckets and "normal"
// values) discovered by Walk. keys is the list of keys to descend to the bucket
// owning the discovered key/value pair k/v.
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
// walk walks recursively the bolt database db, calling walkFn for each key it finds.
func (cmd *CompactCommand) walk(db *bolt.DB, walkFn walkFunc) error {
return db.View(func(tx *bolt.Tx) error {
return tx.ForEach(func(name []byte, b *bolt.Bucket) error {
return cmd.walkBucket(b, nil, name, nil, b.Sequence(), walkFn)
})
})
}
func (cmd *CompactCommand) walkBucket(b *bolt.Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error {
// Execute callback.
if err := fn(keypath, k, v, seq); err != nil {
return err
}
// If this is not a bucket then stop.
if v != nil {
return nil
}
// Iterate over each child key/value.
keypath = append(keypath, k)
return b.ForEach(func(k, v []byte) error {
if v == nil {
bkt := b.Bucket(k)
return cmd.walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn)
}
return cmd.walkBucket(b, keypath, k, v, b.Sequence(), fn)
})
}
// Usage returns the help message.
func (cmd *CompactCommand) Usage() string {
return strings.TrimLeft(`

114
compact.go Normal file
View File

@ -0,0 +1,114 @@
package bbolt
// Compact will create a copy of the source DB and in the destination DB. This may
// reclaim space that the source database no longer has use for. txMaxSize can be
// used to limit the transactions size of this process and may trigger intermittent
// commits. A value of zero will ignore transaction sizes.
// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349
func Compact(dst, src *DB, txMaxSize int64) error {
// commit regularly, or we'll run out of memory for large datasets if using one transaction.
var size int64
tx, err := dst.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
// On each key/value, check if we have exceeded tx size.
sz := int64(len(k) + len(v))
if size+sz > txMaxSize && txMaxSize != 0 {
// Commit previous transaction.
if err := tx.Commit(); err != nil {
return err
}
// Start new transaction.
tx, err = dst.Begin(true)
if err != nil {
return err
}
size = 0
}
size += sz
// Create bucket on the root transaction if this is the first level.
nk := len(keys)
if nk == 0 {
bkt, err := tx.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Create buckets on subsequent levels, if necessary.
b := tx.Bucket(keys[0])
if nk > 1 {
for _, k := range keys[1:] {
b = b.Bucket(k)
}
}
// Fill the entire page for best compaction.
b.FillPercent = 1.0
// If there is no value then this is a bucket call.
if v == nil {
bkt, err := b.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Otherwise treat it as a key/value pair.
return b.Put(k, v)
}); err != nil {
return err
}
return tx.Commit()
}
// walkFunc is the type of the function called for keys (buckets and "normal"
// values) discovered by Walk. keys is the list of keys to descend to the bucket
// owning the discovered key/value pair k/v.
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
// walk walks recursively the bolt database db, calling walkFn for each key it finds.
func walk(db *DB, walkFn walkFunc) error {
return db.View(func(tx *Tx) error {
return tx.ForEach(func(name []byte, b *Bucket) error {
return walkBucket(b, nil, name, nil, b.Sequence(), walkFn)
})
})
}
func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error {
// Execute callback.
if err := fn(keypath, k, v, seq); err != nil {
return err
}
// If this is not a bucket then stop.
if v != nil {
return nil
}
// Iterate over each child key/value.
keypath = append(keypath, k)
return b.ForEach(func(k, v []byte) error {
if v == nil {
bkt := b.Bucket(k)
return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn)
}
return walkBucket(b, keypath, k, v, b.Sequence(), fn)
})
}

View File

@ -66,6 +66,10 @@ func TestOpen(t *testing.T) {
// Regression validation for https://github.com/etcd-io/bbolt/pull/122.
// Tests multiple goroutines simultaneously opening a database.
func TestOpen_MultipleGoroutines(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
const (
instances = 30
iterations = 30
@ -1122,7 +1126,7 @@ func TestDB_Batch(t *testing.T) {
// Iterate over multiple updates in separate goroutines.
n := 2
ch := make(chan error)
ch := make(chan error, n)
for i := 0; i < n; i++ {
go func(i int) {
ch <- db.Batch(func(tx *bolt.Tx) error {

View File

@ -2,7 +2,6 @@ package bbolt
import (
"fmt"
"reflect"
"sort"
"unsafe"
)
@ -94,24 +93,8 @@ func (f *freelist) pending_count() int {
return count
}
// copyallunsafe copies a list of all free ids and all pending ids in one sorted list.
// copyall copies a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer
m := make(pgids, 0, f.pending_count())
for _, txp := range f.pending {
m = append(m, txp.ids...)
}
sort.Sort(m)
fpgids := f.getFreePageIDs()
sz := len(fpgids) + len(m)
dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(dstptr),
Len: sz,
Cap: sz,
}))
mergepgids(dst, fpgids, m)
}
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, txp := range f.pending {
@ -284,21 +267,23 @@ func (f *freelist) read(p *page) {
}
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
var idx, count uintptr = 0, uintptr(p.count)
var idx, count = 0, int(p.count)
if count == 0xFFFF {
idx = 1
count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))))
c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
count = int(c)
if count < 0 {
panic(fmt.Sprintf("leading element count %d overflows int", c))
}
}
// Copy the list of page ids from the freelist.
if count == 0 {
f.ids = nil
} else {
ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)),
Len: int(count),
Cap: int(count),
}))
var ids []pgid
data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx)
unsafeSlice(unsafe.Pointer(&ids), data, count)
// copy the ids, so we don't modify on the freelist page directly
idsCopy := make([]pgid, count)
@ -331,16 +316,22 @@ func (f *freelist) write(p *page) error {
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
l := f.count()
if l == 0 {
p.count = uint16(l)
} else if l < 0xFFFF {
p.count = uint16(l)
var ids []pgid
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
unsafeSlice(unsafe.Pointer(&ids), data, l)
f.copyall(ids)
} else {
p.count = 0xFFFF
*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids)
f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0))))
var ids []pgid
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
unsafeSlice(unsafe.Pointer(&ids), data, l+1)
ids[0] = pgid(l)
f.copyall(ids[1:])
}
return nil

View File

@ -4,7 +4,7 @@ import "sort"
// hashmapFreeCount returns count of free pages(hashmap version)
func (f *freelist) hashmapFreeCount() int {
// use the forwardmap to get the total count
// use the forwardMap to get the total count
count := 0
for _, size := range f.forwardMap {
count += int(size)
@ -41,7 +41,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
for pid := range bm {
// remove the initial
f.delSpan(pid, uint64(size))
f.delSpan(pid, size)
f.allocs[pid] = txid
@ -51,7 +51,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
f.addSpan(pid+pgid(n), remain)
for i := pgid(0); i < pgid(n); i++ {
delete(f.cache, pid+pgid(i))
delete(f.cache, pid+i)
}
return pid
}

2
go.mod
View File

@ -2,4 +2,4 @@ module go.etcd.io/bbolt
go 1.12
require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
require golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d

4
go.sum
View File

@ -1,2 +1,2 @@
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d h1:L/IKR6COd7ubZrs2oTnTi73IhgqJ71c9s80WsQnh0Es=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

71
manydbs_test.go Normal file
View File

@ -0,0 +1,71 @@
package bbolt
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"testing"
)
func createDb(t *testing.T) (*DB, func()) {
// First, create a temporary directory to be used for the duration of
// this test.
tempDirName, err := ioutil.TempDir("", "bboltmemtest")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
path := filepath.Join(tempDirName, "testdb.db")
bdb, err := Open(path, 0600, nil)
if err != nil {
t.Fatalf("error creating bbolt db: %v", err)
}
cleanup := func() {
bdb.Close()
os.RemoveAll(tempDirName)
}
return bdb, cleanup
}
func createAndPutKeys(t *testing.T) {
t.Parallel()
db, cleanup := createDb(t)
defer cleanup()
bucketName := []byte("bucket")
for i := 0; i < 100; i++ {
err := db.Update(func(tx *Tx) error {
nodes, err := tx.CreateBucketIfNotExists(bucketName)
if err != nil {
return err
}
var key [16]byte
rand.Read(key[:])
if err := nodes.Put(key[:], nil); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
}
func TestManyDBs(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
for i := 0; i < 100; i++ {
t.Run(fmt.Sprintf("%d", i), createAndPutKeys)
}
}

25
node.go
View File

@ -3,7 +3,6 @@ package bbolt
import (
"bytes"
"fmt"
"reflect"
"sort"
"unsafe"
)
@ -208,36 +207,32 @@ func (n *node) write(p *page) {
}
// Loop over each item and write it to the page.
bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
// off tracks the offset into p of the start of the next data.
off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
for i, item := range n.inodes {
_assert(len(item.key) > 0, "write: zero-length inode key")
// Create a slice to write into of needed size and advance
// byte pointer for next iteration.
sz := len(item.key) + len(item.value)
b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
off += uintptr(sz)
// Write the page element.
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.flags = item.flags
elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value))
} else {
elem := p.branchPageElement(uint16(i))
elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.ksize = uint32(len(item.key))
elem.pgid = item.pgid
_assert(elem.pgid != p.id, "write: circular dependency occurred")
}
// Create a slice to write into of needed size and advance
// byte pointer for next iteration.
klen, vlen := len(item.key), len(item.value)
sz := klen + vlen
b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: bp,
Len: sz,
Cap: sz,
}))
bp += uintptr(sz)
// Write data for the element to the end of the page.
l := copy(b, item.key)
copy(b[l:], item.value)

View File

@ -44,9 +44,9 @@ func TestNode_read_LeafPage(t *testing.T) {
nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
// Write data for the nodes at the end.
data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
copy(data[:], "barfooz")
copy(data[7:], "helloworldbye")
const s = "barfoozhelloworldbye"
data := unsafeByteSlice(unsafe.Pointer(&nodes[2]), 0, 0, len(s))
copy(data, s)
// Deserialize page into a leaf.
n := &node{}

57
page.go
View File

@ -3,7 +3,6 @@ package bbolt
import (
"fmt"
"os"
"reflect"
"sort"
"unsafe"
)
@ -51,13 +50,13 @@ func (p *page) typ() string {
// meta returns a pointer to the metadata section of the page.
func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
}
// leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement {
off := uintptr(index) * leafPageElementSize
return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
leafPageElementSize, int(index)))
}
// leafPageElements retrieves a list of leaf nodes.
@ -65,17 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
Len: int(p.count),
Cap: int(p.count),
}))
var elems []leafPageElement
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
return elems
}
// branchPageElement retrieves the branch node by index
func (p *page) branchPageElement(index uint16) *branchPageElement {
off := uintptr(index) * unsafe.Sizeof(branchPageElement{})
return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
unsafe.Sizeof(branchPageElement{}), int(index)))
}
// branchPageElements retrieves a list of branch nodes.
@ -83,20 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
Len: int(p.count),
Cap: int(p.count),
}))
var elems []branchPageElement
data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
return elems
}
// dump writes n bytes of the page to STDERR as hex output.
func (p *page) hexdump(n int) {
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p)),
Len: n,
Cap: n,
}))
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
fmt.Fprintf(os.Stderr, "%x\n", buf)
}
@ -115,11 +108,7 @@ type branchPageElement struct {
// key returns a byte slice of the node key.
func (n *branchPageElement) key() []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
Len: int(n.ksize),
Cap: int(n.ksize),
}))
return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
}
// leafPageElement represents a node on a leaf page.
@ -132,20 +121,16 @@ type leafPageElement struct {
// key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
Len: int(n.ksize),
Cap: int(n.ksize),
}))
i := int(n.pos)
j := i + int(n.ksize)
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
}
// value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize),
Len: int(n.vsize),
Cap: int(n.vsize),
}))
i := int(n.pos) + int(n.ksize)
j := i + int(n.vsize)
return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
}
// PageInfo represents human readable information about a page.

30
tx.go
View File

@ -4,7 +4,6 @@ import (
"fmt"
"io"
"os"
"reflect"
"sort"
"strings"
"time"
@ -189,7 +188,6 @@ func (tx *Tx) Commit() error {
}
// If strict mode is enabled then perform a consistency check.
// Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
ch := tx.Check()
var errs []string
@ -394,7 +392,7 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
return err
}
err = tx.Copy(f)
_, err = tx.WriteTo(f)
if err != nil {
_ = f.Close()
return err
@ -524,24 +522,18 @@ func (tx *Tx) write() error {
// Write pages to disk in order.
for _, p := range pages {
size := (int(p.overflow) + 1) * tx.db.pageSize
rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
offset := int64(p.id) * int64(tx.db.pageSize)
var written uintptr
// Write out page in "max allocation" sized chunks.
ptr := uintptr(unsafe.Pointer(p))
for {
// Limit our write to our max allocation size.
sz := size
sz := rem
if sz > maxAllocSize-1 {
sz = maxAllocSize - 1
}
buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
// Write chunk to disk.
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: ptr,
Len: sz,
Cap: sz,
}))
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
return err
}
@ -550,14 +542,14 @@ func (tx *Tx) write() error {
tx.stats.Write++
// Exit inner for loop if we've written all the chunks.
size -= sz
if size == 0 {
rem -= sz
if rem == 0 {
break
}
// Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz)
ptr += uintptr(sz)
written += uintptr(sz)
}
}
@ -576,11 +568,7 @@ func (tx *Tx) write() error {
continue
}
buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
Data: uintptr(unsafe.Pointer(p)),
Len: tx.db.pageSize,
Cap: tx.db.pageSize,
}))
buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {

39
unsafe.go Normal file
View File

@ -0,0 +1,39 @@
package bbolt
import (
"reflect"
"unsafe"
)
func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(base) + offset)
}
func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
}
func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
//
// This memory is not allocated from C, but it is unmanaged by Go's
// garbage collector and should behave similarly, and the compiler
// should produce similar code. Note that this conversion allows a
// subslice to begin after the base address, with an optional offset,
// while the URL above does not cover this case and only slices from
// index 0. However, the wiki never says that the address must be to
// the beginning of a C allocation (or even that malloc was used at
// all), so this is believed to be correct.
return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
}
// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by
// the slice parameter. This helper should be used over other direct
// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
// from reflect.SliceHeader to a Go slice type.
func unsafeSlice(slice, data unsafe.Pointer, len int) {
s := (*reflect.SliceHeader)(slice)
s.Data = uintptr(data)
s.Cap = len
s.Len = len
}