mirror of https://github.com/etcd-io/bbolt.git
Initial db.open.
parent
df8333328f
commit
ee24437bfc
|
@ -0,0 +1,20 @@
|
|||
PKG=./...
|
||||
TEST=.
|
||||
BENCH=.
|
||||
COVERPROFILE=/tmp/c.out
|
||||
|
||||
bench: benchpreq
|
||||
go test -v -test.bench=$(BENCH) ./.bench
|
||||
|
||||
cover: fmt
|
||||
go test -coverprofile=$(COVERPROFILE) .
|
||||
go tool cover -html=$(COVERPROFILE)
|
||||
rm $(COVERPROFILE)
|
||||
|
||||
fmt:
|
||||
@go fmt ./...
|
||||
|
||||
test: fmt
|
||||
@go test -v -cover -test.run=$(TEST) $(PKG)
|
||||
|
||||
.PHONY: bench cover fmt test
|
31
cursor.go
31
cursor.go
|
@ -11,7 +11,6 @@ package bolt
|
|||
|
||||
// TODO: #define MDB_NOSPILL 0x8000 /** Do not spill pages to disk if txn is getting full, may fail instead */
|
||||
|
||||
|
||||
type Cursor interface {
|
||||
First() error
|
||||
FirstDup() error
|
||||
|
@ -32,13 +31,13 @@ type Cursor interface {
|
|||
|
||||
type cursor struct {
|
||||
flags int
|
||||
next *cursor
|
||||
_next *cursor
|
||||
backup *cursor
|
||||
xcursor *xcursor
|
||||
transaction *transaction
|
||||
bucketId int
|
||||
bucket *bucket
|
||||
bucketx *bucketx
|
||||
bucket *Bucket
|
||||
// bucketx *bucketx
|
||||
bucketFlag int
|
||||
snum int
|
||||
top int
|
||||
|
@ -48,8 +47,8 @@ type cursor struct {
|
|||
|
||||
type xcursor struct {
|
||||
cursor cursor
|
||||
bucket *bucket
|
||||
bucketx *bucketx
|
||||
bucket *Bucket
|
||||
// bucketx *bucketx
|
||||
bucketFlag int
|
||||
}
|
||||
|
||||
|
@ -59,7 +58,7 @@ type xcursor struct {
|
|||
// P_DIRTY to set P_KEEP, P_DIRTY|P_KEEP to clear it.
|
||||
// @param[in] all No shortcuts. Needed except after a full #mdb_page_flush().
|
||||
// @return 0 on success, non-zero on failure.
|
||||
func (c *cursor) xkeep(unsigned pflags, int all) int {
|
||||
func (c *cursor) xkeep(pflags int, all int) error {
|
||||
/*
|
||||
enum { Mask = P_SUBP|P_DIRTY|P_KEEP };
|
||||
MDB_txn *txn = mc->mc_txn;
|
||||
|
@ -116,7 +115,7 @@ func (c *cursor) xkeep(unsigned pflags, int all) int {
|
|||
|
||||
return rc;
|
||||
*/
|
||||
return 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill pages from the dirty list back to disk.
|
||||
|
@ -150,7 +149,7 @@ func (c *cursor) xkeep(unsigned pflags, int all) int {
|
|||
// @param[in] key For a put operation, the key being stored.
|
||||
// @param[in] data For a put operation, the data being stored.
|
||||
// @return 0 on success, non-zero on failure.
|
||||
func (c *cursor) spill(MDB_val *key, MDB_val *data) int {
|
||||
func (c *cursor) spill(key []byte, data []byte) error {
|
||||
/*
|
||||
MDB_txn *txn = m0->mc_txn;
|
||||
MDB_page *dp;
|
||||
|
@ -397,7 +396,7 @@ fail:
|
|||
txn->mt_flags |= MDB_TXN_ERROR;
|
||||
return rc;
|
||||
*/
|
||||
return 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy the used portions of a non-overflow page.
|
||||
|
@ -748,6 +747,7 @@ func (c *cursor) searchLowest() error {
|
|||
return rc;
|
||||
return mdb_page_search_root(mc, NULL, MDB_PS_FIRST);
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search for the page a given key should be in.
|
||||
|
@ -906,7 +906,6 @@ release:
|
|||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Find a sibling for a page.
|
||||
// Replaces the page at the top of the cursor's stack with the
|
||||
// specified sibling, if one exists.
|
||||
|
@ -1120,6 +1119,7 @@ func (c *cursor) prev(key []byte, data []byte, op int) error {
|
|||
MDB_GET_KEY(leaf, key);
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the cursor on a specific data item.
|
||||
|
@ -1306,7 +1306,7 @@ set1:
|
|||
return rc;
|
||||
*/
|
||||
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Move the cursor to the first item in the database.
|
||||
|
@ -1356,6 +1356,7 @@ func (c *cursor) first(key []byte, data []byte) error {
|
|||
|
||||
// Move the cursor to the last item in the database.
|
||||
func (c *cursor) last() ([]byte, []byte) {
|
||||
/*
|
||||
int rc;
|
||||
MDB_node *leaf;
|
||||
|
||||
|
@ -1396,6 +1397,8 @@ func (c *cursor) last() ([]byte, []byte) {
|
|||
|
||||
MDB_GET_KEY(leaf, key);
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *cursor) Get(key []byte, data []byte, op int) ([]byte, []byte, error) {
|
||||
|
@ -2453,7 +2456,7 @@ func (c *cursor) xcursor_init1(n *node) {
|
|||
}
|
||||
|
||||
// Initialize a cursor for a given transaction and database.
|
||||
func (c *cursor) init(t *transaction, bucket *bucket, mx *xcursor) {
|
||||
func (c *cursor) init(t *transaction, bucket *Bucket, mx *xcursor) {
|
||||
/*
|
||||
mc->mc_next = NULL;
|
||||
mc->mc_backup = NULL;
|
||||
|
@ -2527,7 +2530,7 @@ func (c *cursor) Transaction() Transaction {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *cursor) Bucket() Bucket {
|
||||
func (c *cursor) Bucket() *Bucket {
|
||||
return c.bucket
|
||||
}
|
||||
|
||||
|
|
382
db.go
382
db.go
|
@ -1,5 +1,12 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
NoSync = iota
|
||||
NoMetaSync
|
||||
|
@ -8,59 +15,54 @@ const (
|
|||
IntegerDupKey
|
||||
)
|
||||
|
||||
var DatabaseAlreadyOpenedError = &Error{"Database already open"}
|
||||
var DatabaseAlreadyOpenedError = &Error{"Database already open", nil}
|
||||
|
||||
// TODO: #define MDB_FATAL_ERROR 0x80000000U /** Failed to update the meta page. Probably an I/O error. */
|
||||
// TODO: #define MDB_ENV_ACTIVE 0x20000000U /** Some fields are initialized. */
|
||||
// TODO: #define MDB_ENV_TXKEY 0x10000000U /** me_txkey is set */
|
||||
// TODO: #define MDB_LIVE_READER 0x08000000U /** Have liveness lock in reader table */
|
||||
|
||||
type DB interface {
|
||||
syncEnabled bool
|
||||
metaSyncEnabled bool
|
||||
}
|
||||
|
||||
type db struct {
|
||||
type DB struct {
|
||||
sync.Mutex
|
||||
opened bool
|
||||
|
||||
file os.File
|
||||
metafile os.File
|
||||
file *os.File
|
||||
metafile *os.File
|
||||
data []byte
|
||||
buf []byte
|
||||
meta0 *meta
|
||||
meta1 *meta
|
||||
|
||||
pageSize int
|
||||
readers []*reader
|
||||
buckets []*bucket
|
||||
xbuckets []*bucketx /**< array of static DB info */
|
||||
buckets []*Bucket
|
||||
// xbuckets []*bucketx /**< array of static DB info */
|
||||
bucketFlags []int /**< array of flags from MDB_db.md_flags */
|
||||
path string
|
||||
mmap []byte
|
||||
mmapSize int /**< size of the data memory map */
|
||||
size int /**< current file size */
|
||||
meta1 []byte
|
||||
meta2 []byte
|
||||
pbuf []byte
|
||||
transaction *transaction /**< current write transaction */
|
||||
maxPageNumber int /**< me_mapsize / me_psize */
|
||||
pageState pageStage /**< state of old pages from freeDB */
|
||||
pageState pageState /**< state of old pages from freeDB */
|
||||
dpages []*page /**< list of malloc'd blocks for re-use */
|
||||
freePages []int /** IDL of pages that became unused in a write txn */
|
||||
dirtyPages []int /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */
|
||||
maxFreeOnePage int /** Max number of freelist items that can fit in a single overflow page */
|
||||
maxPageDataSize int
|
||||
maxNodeSize int /** Max size of a node on a page */
|
||||
maxKeySize int /**< max size of a key */
|
||||
}
|
||||
|
||||
|
||||
func NewDB() DB {
|
||||
return &db{}
|
||||
func NewDB() *DB {
|
||||
return &DB{}
|
||||
}
|
||||
|
||||
func (db *db) Path() string {
|
||||
func (db *DB) Path() string {
|
||||
return db.path
|
||||
}
|
||||
|
||||
func (db *db) Open(path string, mode os.FileMode) error {
|
||||
func (db *DB) Open(path string, mode os.FileMode) error {
|
||||
var err error
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
|
@ -72,24 +74,24 @@ func (db *db) Open(path string, mode os.FileMode) error {
|
|||
|
||||
// Open data file and separate sync handler for metadata writes.
|
||||
db.path = path
|
||||
if db.file, err = os.OpenFile(db.path, O_RDWR | O_CREAT, mode); err != nil {
|
||||
if db.file, err = os.OpenFile(db.path, os.O_RDWR|os.O_CREATE, mode); err != nil {
|
||||
db.close()
|
||||
return err
|
||||
}
|
||||
if db.metafile, err = os.OpenFile(db.path, O_RDWR | O_SYNC, mode); err != nil {
|
||||
if db.metafile, err = os.OpenFile(db.path, os.O_RDWR|os.O_SYNC, mode); err != nil {
|
||||
db.close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Read enough data to get both meta pages.
|
||||
var m, m0, m1 *meta
|
||||
var buf [headerSize + unsafe.Sizeof(meta)]byte
|
||||
if _, err := db.file.ReadAt(buf, 0); err == nil {
|
||||
var buf [pageHeaderSize + int(unsafe.Sizeof(meta{}))]byte
|
||||
if _, err := db.file.ReadAt(buf[:], 0); err == nil {
|
||||
if m0, _ = db.page(buf[:], 0).meta(); m0 != nil {
|
||||
db.pageSize = m0.free.pad
|
||||
db.pageSize = int(m0.free.pad)
|
||||
}
|
||||
}
|
||||
if _, err := db.file.ReadAt(buf, db.pageSize); err == nil {
|
||||
if _, err := db.file.ReadAt(buf[:], int64(db.pageSize)); err == nil {
|
||||
m1, _ = db.page(buf[:], 0).meta()
|
||||
}
|
||||
if m0 != nil && m1 != nil {
|
||||
|
@ -102,27 +104,16 @@ func (db *db) Open(path string, mode os.FileMode) error {
|
|||
|
||||
// Initialize the page size for new environments.
|
||||
if m == nil {
|
||||
db.pageSize = os.Getpagesize()
|
||||
if db.pageSize > maxPageSize {
|
||||
db.pageSize = maxPageSize
|
||||
if err := db.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Check mapsize.
|
||||
/*
|
||||
// Was a mapsize configured?
|
||||
if (!env->me_mapsize) {
|
||||
// If this is a new environment, take the default,
|
||||
// else use the size recorded in the existing env.
|
||||
env->me_mapsize = newenv ? DEFAULT_MAPSIZE : meta.mm_mapsize;
|
||||
} else if (env->me_mapsize < meta.mm_mapsize) {
|
||||
// If the configured size is smaller, make sure it's
|
||||
// still big enough. Silently round up to minimum if not.
|
||||
size_t minsize = (meta.mm_last_pg + 1) * meta.mm_psize;
|
||||
if (env->me_mapsize < minsize)
|
||||
env->me_mapsize = minsize;
|
||||
}
|
||||
*/
|
||||
// Initialize db fields.
|
||||
db.buf = make([]byte, db.pageSize)
|
||||
db.maxPageDataSize = ((db.pageSize - pageHeaderSize) / int(unsafe.Sizeof(pgno(0)))) - 1
|
||||
db.maxNodeSize = (((db.pageSize - pageHeaderSize) / minKeyCount) & -2) - int(unsafe.Sizeof(indx(0)))
|
||||
// TODO?: env->me_maxpg = env->me_mapsize / env->me_psize;
|
||||
|
||||
// Memory map the data file.
|
||||
if err := db.mmap(); err != nil {
|
||||
|
@ -130,94 +121,87 @@ func (db *db) Open(path string, mode os.FileMode) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Initialize the buffer.
|
||||
db.buf = make([]byte, db.pageSize)
|
||||
// TODO: Initialize meta.
|
||||
// if (newenv) {
|
||||
// i = mdb_env_init_meta(env, &meta);
|
||||
// if (i != MDB_SUCCESS) {
|
||||
// return i;
|
||||
// }
|
||||
// }
|
||||
|
||||
// Mark the database as opened and return.
|
||||
db.opened = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the meta pages and return the latest.
|
||||
func (db *db) readMeta() *meta {
|
||||
m := &meta{}
|
||||
m.read()
|
||||
// int mdb_env_map(MDB_env *env, void *addr, int newsize)
|
||||
func (db *DB) mmap() error {
|
||||
var err error
|
||||
|
||||
/*
|
||||
if ((i = mdb_env_read_header(env, &meta)) != 0) {
|
||||
if (i != ENOENT)
|
||||
return i;
|
||||
DPUTS("new mdbenv");
|
||||
newenv = 1;
|
||||
env->me_psize = env->me_os_psize;
|
||||
if (env->me_psize > MAX_PAGESIZE)
|
||||
env->me_psize = MAX_PAGESIZE;
|
||||
// Determine the map size based on the file size.
|
||||
var size int
|
||||
if info, err := os.Stat(db.file.Name()); err != nil {
|
||||
return err
|
||||
} else if info.Size() < int64(db.pageSize*2) {
|
||||
return &Error{"file size too small", nil}
|
||||
} else {
|
||||
env->me_psize = meta.mm_psize;
|
||||
size = int(info.Size())
|
||||
}
|
||||
|
||||
|
||||
rc = mdb_env_map(env, meta.mm_address, newenv);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (newenv) {
|
||||
if (flags & MDB_FIXEDMAP)
|
||||
meta.mm_address = env->me_map;
|
||||
i = mdb_env_init_meta(env, &meta);
|
||||
if (i != MDB_SUCCESS) {
|
||||
return i;
|
||||
}
|
||||
// Memory-map the data file as a byte slice.
|
||||
if db.data, err = syscall.Mmap(int(db.file.Fd()), 0, size, syscall.PROT_READ, syscall.MAP_SHARED); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
env->me_maxfree_1pg = (env->me_psize - PAGEHDRSZ) / sizeof(pgno_t) - 1;
|
||||
env->me_nodemax = (((env->me_psize - PAGEHDRSZ) / MDB_MINKEYS) & -2)
|
||||
- sizeof(indx_t);
|
||||
#if !(MDB_MAXKEYSIZE)
|
||||
env->me_maxkey = env->me_nodemax - (NODESIZE + sizeof(MDB_db));
|
||||
#endif
|
||||
env->me_maxpg = env->me_mapsize / env->me_psize;
|
||||
// TODO?: If nordahead, then: madvise(env->me_map, env->me_mapsize, MADV_RANDOM);
|
||||
|
||||
#if MDB_DEBUG
|
||||
{
|
||||
int toggle = mdb_env_pick_meta(env);
|
||||
MDB_db *db = &env->me_metas[toggle]->mm_dbs[MAIN_DBI];
|
||||
|
||||
DPRINTF(("opened database version %u, pagesize %u",
|
||||
env->me_metas[0]->mm_version, env->me_psize));
|
||||
DPRINTF(("using meta page %d", toggle));
|
||||
DPRINTF(("depth: %u", db->md_depth));
|
||||
DPRINTF(("entries: %"Z"u", db->md_entries));
|
||||
DPRINTF(("branch pages: %"Z"u", db->md_branch_pages));
|
||||
DPRINTF(("leaf pages: %"Z"u", db->md_leaf_pages));
|
||||
DPRINTF(("overflow pages: %"Z"u", db->md_overflow_pages));
|
||||
DPRINTF(("root: %"Z"u", db->md_root));
|
||||
// Save references to the meta pages.
|
||||
if db.meta0, err = db.page(db.data, 0).meta(); err != nil {
|
||||
return &Error{"meta0 error", err}
|
||||
}
|
||||
if db.meta1, err = db.page(db.data, 1).meta(); err != nil {
|
||||
return &Error{"meta1 error", err}
|
||||
}
|
||||
#endif
|
||||
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
// page retrieves a page reference from a given byte array based on the current page size.
|
||||
func (db *db) page(b []byte, id int) *page {
|
||||
return (*page)(unsafe.Pointer(b[id * db.pageSize]))
|
||||
// init creates a new database file and initializes its meta pages.
|
||||
func (db *DB) init() error {
|
||||
// Set the page size to the OS page size unless that is larger than max page size.
|
||||
db.pageSize = os.Getpagesize()
|
||||
if db.pageSize > maxPageSize {
|
||||
db.pageSize = maxPageSize
|
||||
}
|
||||
|
||||
// Create two meta pages on a buffer.
|
||||
buf := make([]byte, db.pageSize*2)
|
||||
for i := 0; i < 2; i++ {
|
||||
p := db.page(buf[:], i)
|
||||
p.id = pgno(i)
|
||||
p.initMeta(db.pageSize)
|
||||
}
|
||||
|
||||
// Write the buffer to our data file.
|
||||
if _, err := db.metafile.WriteAt(buf, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) close() {
|
||||
// TODO
|
||||
}
|
||||
|
||||
// page retrieves a page reference from a given byte array based on the current page size.
|
||||
func (db *DB) page(b []byte, id int) *page {
|
||||
return (*page)(unsafe.Pointer(&b[id*db.pageSize]))
|
||||
}
|
||||
|
||||
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ CONVERTED ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ //
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
func (db *db) freePage(p *page) {
|
||||
func (db *DB) freePage(p *page) {
|
||||
/*
|
||||
mp->mp_next = env->me_dpages;
|
||||
VGMEMP_FREE(env, mp);
|
||||
|
@ -225,7 +209,7 @@ func (db *db) freePage(p *page) {
|
|||
*/
|
||||
}
|
||||
|
||||
func (db *db) freeDirtyPage(p *page) {
|
||||
func (db *DB) freeDirtyPage(p *page) {
|
||||
/*
|
||||
if (!IS_OVERFLOW(dp) || dp->mp_pages == 1) {
|
||||
mdb_page_free(env, dp);
|
||||
|
@ -237,7 +221,7 @@ func (db *db) freeDirtyPage(p *page) {
|
|||
*/
|
||||
}
|
||||
|
||||
func (db *db) freeAllDirtyPages(p *page) {
|
||||
func (db *DB) freeAllDirtyPages(p *page) {
|
||||
/*
|
||||
MDB_env *env = txn->mt_env;
|
||||
MDB_ID2L dl = txn->mt_u.dirty_list;
|
||||
|
@ -250,7 +234,7 @@ func (db *db) freeAllDirtyPages(p *page) {
|
|||
*/
|
||||
}
|
||||
|
||||
func (db *db) sync(force bool) error {
|
||||
func (db *DB) sync(force bool) error {
|
||||
/*
|
||||
int rc = 0;
|
||||
if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) {
|
||||
|
@ -273,7 +257,7 @@ func (db *db) sync(force bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *db) Transaction(parent *transaction, flags int) (*transaction, error) {
|
||||
func (db *DB) Transaction(parent *transaction, flags int) (*transaction, error) {
|
||||
/*
|
||||
MDB_txn *txn;
|
||||
MDB_ntxn *ntxn;
|
||||
|
@ -367,80 +351,20 @@ func (db *db) Transaction(parent *transaction, flags int) (*transaction, error)
|
|||
|
||||
return rc;
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write the environment parameters of a freshly created DB environment.
|
||||
// @param[in] env the environment handle
|
||||
// @param[out] meta address of where to store the meta information
|
||||
// @return 0 on success, non-zero on failure.
|
||||
func (db *db) initMeta(meta *meta) error {
|
||||
/*
|
||||
MDB_page *p, *q;
|
||||
int rc;
|
||||
unsigned int psize;
|
||||
#ifdef _WIN32
|
||||
DWORD len;
|
||||
OVERLAPPED ov;
|
||||
memset(&ov, 0, sizeof(ov));
|
||||
#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \
|
||||
ov.Offset = pos; \
|
||||
rc = WriteFile(fd, ptr, size, &len, &ov); } while(0)
|
||||
#else
|
||||
int len;
|
||||
#define DO_PWRITE(rc, fd, ptr, size, len, pos) do { \
|
||||
len = pwrite(fd, ptr, size, pos); \
|
||||
rc = (len >= 0); } while(0)
|
||||
#endif
|
||||
|
||||
DPUTS("writing new meta page");
|
||||
|
||||
psize = env->me_psize;
|
||||
|
||||
meta->mm_magic = MDB_MAGIC;
|
||||
meta->mm_version = MDB_DATA_VERSION;
|
||||
meta->mm_mapsize = env->me_mapsize;
|
||||
meta->mm_psize = psize;
|
||||
meta->mm_last_pg = 1;
|
||||
meta->mm_flags = env->me_flags & 0xffff;
|
||||
meta->mm_flags |= MDB_INTEGERKEY;
|
||||
meta->mm_dbs[0].md_root = P_INVALID;
|
||||
meta->mm_dbs[1].md_root = P_INVALID;
|
||||
|
||||
p = calloc(2, psize);
|
||||
p->mp_pgno = 0;
|
||||
p->mp_flags = P_META;
|
||||
*(MDB_meta *)METADATA(p) = *meta;
|
||||
|
||||
q = (MDB_page *)((char *)p + psize);
|
||||
q->mp_pgno = 1;
|
||||
q->mp_flags = P_META;
|
||||
*(MDB_meta *)METADATA(q) = *meta;
|
||||
|
||||
DO_PWRITE(rc, env->me_fd, p, psize * 2, len, 0);
|
||||
if (!rc)
|
||||
rc = ErrCode();
|
||||
else if ((unsigned) len == psize * 2)
|
||||
rc = MDB_SUCCESS;
|
||||
else
|
||||
rc = ENOSPC;
|
||||
free(p);
|
||||
return rc;
|
||||
*/
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check both meta pages to see which one is newer.
|
||||
// @param[in] env the environment handle
|
||||
// @return meta toggle (0 or 1).
|
||||
func (db *db) pickMeta() int {
|
||||
func (db *DB) pickMeta() int {
|
||||
/*
|
||||
return (env->me_metas[0]->mm_txnid < env->me_metas[1]->mm_txnid);
|
||||
*/
|
||||
return 0
|
||||
}
|
||||
|
||||
func (db *db) Create() error {
|
||||
func (db *DB) Create() error {
|
||||
/*
|
||||
MDB_env *e;
|
||||
|
||||
|
@ -466,81 +390,7 @@ func (db *db) Create() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// int mdb_env_map(MDB_env *env, void *addr, int newsize)
|
||||
func (db *db) mmap(newsize int) error {
|
||||
/*
|
||||
MDB_page *p;
|
||||
unsigned int flags = env->me_flags;
|
||||
#ifdef _WIN32
|
||||
int rc;
|
||||
HANDLE mh;
|
||||
LONG sizelo, sizehi;
|
||||
sizelo = env->me_mapsize & 0xffffffff;
|
||||
sizehi = env->me_mapsize >> 16 >> 16; // only needed on Win64
|
||||
|
||||
// Windows won't create mappings for zero length files.
|
||||
// Just allocate the maxsize right now.
|
||||
if (newsize) {
|
||||
if (SetFilePointer(env->me_fd, sizelo, &sizehi, 0) != (DWORD)sizelo
|
||||
|| !SetEndOfFile(env->me_fd)
|
||||
|| SetFilePointer(env->me_fd, 0, NULL, 0) != 0)
|
||||
return ErrCode();
|
||||
}
|
||||
mh = CreateFileMapping(env->me_fd, NULL, flags & MDB_WRITEMAP ?
|
||||
PAGE_READWRITE : PAGE_READONLY,
|
||||
sizehi, sizelo, NULL);
|
||||
if (!mh)
|
||||
return ErrCode();
|
||||
env->me_map = MapViewOfFileEx(mh, flags & MDB_WRITEMAP ?
|
||||
FILE_MAP_WRITE : FILE_MAP_READ,
|
||||
0, 0, env->me_mapsize, addr);
|
||||
rc = env->me_map ? 0 : ErrCode();
|
||||
CloseHandle(mh);
|
||||
if (rc)
|
||||
return rc;
|
||||
#else
|
||||
int prot = PROT_READ;
|
||||
if (flags & MDB_WRITEMAP) {
|
||||
prot |= PROT_WRITE;
|
||||
if (ftruncate(env->me_fd, env->me_mapsize) < 0)
|
||||
return ErrCode();
|
||||
}
|
||||
env->me_map = mmap(addr, env->me_mapsize, prot, MAP_SHARED,
|
||||
env->me_fd, 0);
|
||||
if (env->me_map == MAP_FAILED) {
|
||||
env->me_map = NULL;
|
||||
return ErrCode();
|
||||
}
|
||||
|
||||
if (flags & MDB_NORDAHEAD) {
|
||||
// Turn off readahead. It's harmful when the DB is larger than RAM.
|
||||
#ifdef MADV_RANDOM
|
||||
madvise(env->me_map, env->me_mapsize, MADV_RANDOM);
|
||||
#else
|
||||
#ifdef POSIX_MADV_RANDOM
|
||||
posix_madvise(env->me_map, env->me_mapsize, POSIX_MADV_RANDOM);
|
||||
#endif // POSIX_MADV_RANDOM
|
||||
#endif // MADV_RANDOM
|
||||
}
|
||||
#endif // _WIN32
|
||||
|
||||
// Can happen because the address argument to mmap() is just a
|
||||
// hint. mmap() can pick another, e.g. if the range is in use.
|
||||
// The MAP_FIXED flag would prevent that, but then mmap could
|
||||
// instead unmap existing pages to make room for the new map.
|
||||
if (addr && env->me_map != addr)
|
||||
return EBUSY; // TODO: Make a new MDB_* error code?
|
||||
|
||||
p = (MDB_page *)env->me_map;
|
||||
env->me_metas[0] = METADATA(p);
|
||||
env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + env->me_psize);
|
||||
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *db) setMapSize(size int) error {
|
||||
func (db *DB) setMapSize(size int) error {
|
||||
/*
|
||||
// If env is already open, caller is responsible for making
|
||||
// sure there are no active txns.
|
||||
|
@ -573,7 +423,7 @@ func (db *db) setMapSize(size int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *db) setMaxBucketCount(count int) error {
|
||||
func (db *DB) setMaxBucketCount(count int) error {
|
||||
/*
|
||||
if (env->me_map)
|
||||
return EINVAL;
|
||||
|
@ -583,16 +433,17 @@ func (db *db) setMaxBucketCount(count int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *db) setMaxReaderCount(count int) error {
|
||||
func (db *DB) setMaxReaderCount(count int) error {
|
||||
/*
|
||||
if (env->me_map || readers < 1)
|
||||
return EINVAL;
|
||||
env->me_maxreaders = readers;
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *db) getMaxReaderCount(count int) (int, error) {
|
||||
func (db *DB) getMaxReaderCount(count int) (int, error) {
|
||||
/*
|
||||
if (!env || !readers)
|
||||
return EINVAL;
|
||||
|
@ -602,9 +453,8 @@ func (db *db) getMaxReaderCount(count int) (int, error) {
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
|
||||
// Destroy resources from mdb_env_open(), clear our readers & DBIs
|
||||
func (db *db) close0(excl) {
|
||||
func (db *DB) close0(excl int) {
|
||||
/*
|
||||
int i;
|
||||
|
||||
|
@ -688,7 +538,7 @@ func (db *db) close0(excl) {
|
|||
*/
|
||||
}
|
||||
|
||||
func (db *db) copyfd(handle int) error {
|
||||
func (db *DB) copyfd(handle int) error {
|
||||
/*
|
||||
MDB_txn *txn = NULL;
|
||||
int rc;
|
||||
|
@ -833,7 +683,7 @@ leave:
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *db) Close() {
|
||||
func (db *DB) Close() {
|
||||
/*
|
||||
MDB_page *dp;
|
||||
|
||||
|
@ -862,7 +712,7 @@ func (db *db) Close() {
|
|||
// @param[in] key The key for the node.
|
||||
// @param[in] data The data for the node.
|
||||
// @return The number of bytes needed to store the node.
|
||||
func (db *db) LeafSize(key []byte, data []byte) int {
|
||||
func (db *DB) LeafSize(key []byte, data []byte) int {
|
||||
/*
|
||||
size_t sz;
|
||||
|
||||
|
@ -886,7 +736,7 @@ func (db *db) LeafSize(key []byte, data []byte) int {
|
|||
// @param[in] env The environment handle.
|
||||
// @param[in] key The key for the node.
|
||||
// @return The number of bytes needed to store the node.
|
||||
func (db *db) BranchSize(key []byte) int {
|
||||
func (db *DB) BranchSize(key []byte) int {
|
||||
/*
|
||||
size_t sz;
|
||||
|
||||
|
@ -902,7 +752,7 @@ func (db *db) BranchSize(key []byte) int {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (db *db) SetFlags(flag int, onoff bool) error {
|
||||
func (db *DB) SetFlags(flag int, onoff bool) error {
|
||||
/*
|
||||
if ((flag & CHANGEABLE) != flag)
|
||||
return EINVAL;
|
||||
|
@ -915,12 +765,7 @@ func (db *db) SetFlags(flag int, onoff bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (db *db) Flags() int {
|
||||
return db.flags
|
||||
}
|
||||
|
||||
|
||||
func (db *db) Stat() *Stat
|
||||
func (db *DB) Stat() *Stat {
|
||||
/*
|
||||
int toggle;
|
||||
|
||||
|
@ -939,9 +784,10 @@ func (db *db) Stat() *Stat
|
|||
//return mdb_stat0(env, &env->me_metas[toggle]->mm_dbs[MAIN_DBI], stat);
|
||||
return stat
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *db) Info() *Info {
|
||||
func (db *DB) Info() *Info {
|
||||
/*
|
||||
int toggle;
|
||||
|
||||
|
@ -965,7 +811,7 @@ func (db *db) Info() *Info {
|
|||
}
|
||||
|
||||
// TODO: Move to bucket.go
|
||||
func (db *db) CloseBucket(b Bucket) {
|
||||
func (db *DB) CloseBucket(b Bucket) {
|
||||
/*
|
||||
char *ptr;
|
||||
if (dbi <= MAIN_DBI || dbi >= env->me_maxdbs)
|
||||
|
@ -979,7 +825,7 @@ func (db *db) CloseBucket(b Bucket) {
|
|||
}
|
||||
|
||||
//int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx)
|
||||
func (db *db) getReaderList() error {
|
||||
func (db *DB) getReaderList() error {
|
||||
/*
|
||||
unsigned int i, rdrs;
|
||||
MDB_reader *mr;
|
||||
|
@ -1019,7 +865,7 @@ func (db *db) getReaderList() error {
|
|||
}
|
||||
|
||||
// (bool return is whether reader is dead)
|
||||
func (db *db) checkReaders() (bool, error) {
|
||||
func (db *DB) checkReaders() (bool, error) {
|
||||
/*
|
||||
unsigned int i, j, rdrs;
|
||||
MDB_reader *mr;
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
package bolt
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDBOpen(t *testing.T) {
|
||||
withDB(func(db *DB, path string) {
|
||||
err := db.Open(path, 0666)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func withDB(fn func(*DB, string)) {
|
||||
f, _ := ioutil.TempFile("", "bolt-")
|
||||
path := f.Name()
|
||||
f.Close()
|
||||
os.Remove(path)
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
db := NewDB()
|
||||
fn(db, path)
|
||||
}
|
42
error.go
42
error.go
|
@ -1,30 +1,34 @@
|
|||
package bolt
|
||||
|
||||
var (
|
||||
KeyExistError = &Error{"Key/data pair already exists"}
|
||||
NotFoundError = &Error{"No matching key/data pair found"}
|
||||
PageNotFoundError = &Error{"Requested page not found"}
|
||||
CorruptedError = &Error{"Located page was wrong type"}
|
||||
PanicError = &Error{"Update of meta page failed"}
|
||||
VersionMismatchError = &Error{"Database environment version mismatch"}
|
||||
InvalidError = &Error{"File is not an MDB file"}
|
||||
MapFullError = &Error{"Environment mapsize limit reached"}
|
||||
BucketFullError = &Error{"Environment maxdbs limit reached"}
|
||||
ReadersFullError = &Error{"Environment maxreaders limit reached"}
|
||||
TransactionFullError = &Error{"Transaction has too many dirty pages - transaction too big"}
|
||||
CursorFullError = &Error{"Internal error - cursor stack limit reached"}
|
||||
PageFullError = &Error{"Internal error - page has no more space"}
|
||||
MapResizedError = &Error{"Database contents grew beyond environment mapsize"}
|
||||
IncompatibleError = &Error{"Operation and DB incompatible, or DB flags changed"}
|
||||
BadReaderSlotError = &Error{"Invalid reuse of reader locktable slot"}
|
||||
BadTransactionError = &Error{"Transaction cannot recover - it must be aborted"}
|
||||
BadValueSizeError = &Error{"Too big key/data, key is empty, or wrong DUPFIXED size"}
|
||||
KeyExistError = &Error{"key/data pair already exists", nil}
|
||||
NotFoundError = &Error{"no matching key/data pair found", nil}
|
||||
PageNotFoundError = &Error{"requested page not found", nil}
|
||||
CorruptedError = &Error{"located page was wrong type", nil}
|
||||
PanicError = &Error{"update of meta page failed", nil}
|
||||
VersionMismatchError = &Error{"database environment version mismatch", nil}
|
||||
InvalidError = &Error{"file is not a bolt file", nil}
|
||||
MapFullError = &Error{"environment mapsize limit reached", nil}
|
||||
BucketFullError = &Error{"environment maxdbs limit reached", nil}
|
||||
ReadersFullError = &Error{"environment maxreaders limit reached", nil}
|
||||
TransactionFullError = &Error{"transaction has too many dirty pages - transaction too big", nil}
|
||||
CursorFullError = &Error{"internal error - cursor stack limit reached", nil}
|
||||
PageFullError = &Error{"internal error - page has no more space", nil}
|
||||
MapResizedError = &Error{"database contents grew beyond environment mapsize", nil}
|
||||
IncompatibleError = &Error{"operation and db incompatible, or db flags changed", nil}
|
||||
BadReaderSlotError = &Error{"invalid reuse of reader locktable slot", nil}
|
||||
BadTransactionError = &Error{"transaction cannot recover - it must be aborted", nil}
|
||||
BadValueSizeError = &Error{"too big key/data or key is empty", nil}
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
message string
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e *Error) Error() {
|
||||
func (e *Error) Error() string {
|
||||
if e.cause != nil {
|
||||
return e.message + ": " + e.cause.Error()
|
||||
}
|
||||
return e.message
|
||||
}
|
||||
|
|
17
meta.go
17
meta.go
|
@ -1,7 +1,7 @@
|
|||
package bolt
|
||||
|
||||
var (
|
||||
InvalidMetaPageError = &Error{"Invalid meta page"}
|
||||
InvalidMetaPageError = &Error{"Invalid meta page", nil}
|
||||
)
|
||||
|
||||
// TODO: #define mm_psize mm_dbs[0].md_pad
|
||||
|
@ -25,16 +25,14 @@ var (
|
|||
// void *md_relctx; /**< user-provided context for md_rel */
|
||||
// } MDB_dbx;
|
||||
|
||||
const magic int32 = 0xBEEFC0DE
|
||||
|
||||
|
||||
const magic uint32 = 0xC0DEC0DE
|
||||
const version uint32 = 1
|
||||
|
||||
type meta struct {
|
||||
magic int32
|
||||
version int32
|
||||
mapsize int
|
||||
free bucket
|
||||
main bucket
|
||||
magic uint32
|
||||
version uint32
|
||||
free Bucket
|
||||
main Bucket
|
||||
pgno int
|
||||
txnid int
|
||||
}
|
||||
|
@ -49,7 +47,6 @@ func (m *meta) validate() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Read the environment parameters of a DB environment before
|
||||
// mapping it into memory.
|
||||
// @param[in] env the environment handle
|
||||
|
|
44
page.go
44
page.go
|
@ -5,9 +5,11 @@ import (
|
|||
)
|
||||
|
||||
const maxPageSize = 0x8000
|
||||
const minKeyCount = 2
|
||||
|
||||
var _page page
|
||||
const headerSize = unsafe.Offsetof(_page.ptr)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(_page.ptr))
|
||||
|
||||
const minPageKeys = 2
|
||||
const fillThreshold = 250 // 25%
|
||||
|
@ -20,13 +22,15 @@ const (
|
|||
p_dirty = 0x10 /**< dirty page, also set for #P_SUBP pages */
|
||||
p_sub = 0x40
|
||||
p_keep = 0x8000 /**< leave this page alone during spill */
|
||||
|
||||
p_invalid = ^pgno(0)
|
||||
)
|
||||
|
||||
// maxCommitPages is the maximum number of pages to commit in one writev() call.
|
||||
const maxCommitPages 64
|
||||
const maxCommitPages = 64
|
||||
|
||||
/* max bytes to write in one call */
|
||||
const maxWriteByteCount 0x80000000U // TODO: #define MAX_WRITE 0x80000000U >> (sizeof(ssize_t) == 4))
|
||||
const maxWriteByteCount uint = 0x80000000 // TODO: #define MAX_WRITE 0x80000000U >> (sizeof(ssize_t) == 4))
|
||||
|
||||
// TODO:
|
||||
// #if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES
|
||||
|
@ -42,12 +46,14 @@ const maxWriteByteCount 0x80000000U // TODO: #define MAX_WRITE 0x80000000U >>
|
|||
// TODO: #define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */
|
||||
|
||||
type pgno uint64
|
||||
type txnid uint64
|
||||
type indx uint16
|
||||
|
||||
type page struct {
|
||||
id pgno
|
||||
flags int
|
||||
lower int
|
||||
upper int
|
||||
lower indx
|
||||
upper indx
|
||||
overflow int
|
||||
ptr int
|
||||
}
|
||||
|
@ -60,8 +66,8 @@ type pageState struct {
|
|||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() (*meta, error) {
|
||||
// Exit if page is not a meta page.
|
||||
if (p.flags & p_meta) != 0 {
|
||||
return InvalidMetaPageError
|
||||
if (p.flags & p_meta) == 0 {
|
||||
return nil, InvalidMetaPageError
|
||||
}
|
||||
|
||||
// Cast the meta section and validate before returning.
|
||||
|
@ -72,12 +78,17 @@ func (p *page) meta() (*meta, error) {
|
|||
return m, nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// initMeta initializes a page as a new meta page.
|
||||
func (p *page) initMeta(pageSize int) {
|
||||
p.flags = p_meta
|
||||
m := (*meta)(unsafe.Pointer(&p.ptr))
|
||||
m.magic = magic
|
||||
m.version = version
|
||||
m.free.pad = uint32(pageSize)
|
||||
m.pgno = 1
|
||||
m.free.root = p_invalid
|
||||
m.main.root = p_invalid
|
||||
}
|
||||
|
||||
// nodeCount returns the number of nodes on the page.
|
||||
func (p *page) nodeCount() int {
|
||||
|
@ -86,10 +97,5 @@ func (p *page) nodeCount() int {
|
|||
|
||||
// remainingSize returns the number of bytes left in the page.
|
||||
func (p *page) remainingSize() int {
|
||||
return p.header.upper - p.header.lower
|
||||
}
|
||||
|
||||
// remainingSize returns the number of bytes left in the page.
|
||||
func (p *page) remainingSize() int {
|
||||
return p.header.upper - p.header.lower
|
||||
return int(p.upper - p.lower)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
package bolt
|
||||
|
||||
type reader struct {
|
||||
int transactionID
|
||||
txnid int
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ type Transaction interface {
|
|||
type transaction struct {
|
||||
id int
|
||||
flags int
|
||||
db *db
|
||||
db *DB
|
||||
parent *transaction
|
||||
child *transaction
|
||||
nextPageNumber int
|
||||
|
@ -25,7 +25,7 @@ type transaction struct {
|
|||
dirtyList []int
|
||||
reader *reader
|
||||
// TODO: bucketxs []*bucketx
|
||||
buckets []*bucket
|
||||
buckets []*Bucket
|
||||
bucketFlags []int
|
||||
cursors []*cursor
|
||||
// Implicit from slices? TODO: MDB_dbi mt_numdbs;
|
||||
|
@ -38,7 +38,6 @@ type ntxn struct {
|
|||
pageState pageState /**< parent transaction's saved freestate */
|
||||
}
|
||||
|
||||
|
||||
func (t *transaction) allocPage(num int) *page {
|
||||
/*
|
||||
MDB_env *env = txn->mt_env;
|
||||
|
@ -390,7 +389,7 @@ func (t *transaction) Renew() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *transaction) DB() DB {
|
||||
func (t *transaction) DB() *DB {
|
||||
return t.db
|
||||
}
|
||||
|
||||
|
@ -663,7 +662,7 @@ func (t *transaction) saveFreelist() error {
|
|||
// @param[in] txn the transaction that's being committed
|
||||
// @param[in] keep number of initial pages in dirty_list to keep dirty.
|
||||
// @return 0 on success, non-zero on failure.
|
||||
func (t *transaction) flush(keep bool) {
|
||||
func (t *transaction) flush(keep bool) error {
|
||||
/*
|
||||
MDB_env *env = txn->mt_env;
|
||||
MDB_ID2L dl = txn->mt_u.dirty_list;
|
||||
|
@ -1272,10 +1271,10 @@ func (t *transaction) Cursor(b Bucket) (Cursor, error) {
|
|||
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *transaction) Renew(c Cursor) error {
|
||||
func (t *transaction) Renew1(c Cursor) error {
|
||||
/*
|
||||
if (txn == NULL || mc == NULL || mc->mc_dbi >= txn->mt_numdbs)
|
||||
return EINVAL;
|
||||
|
@ -1286,9 +1285,10 @@ func (t *transaction) Renew(c Cursor) error {
|
|||
mdb_cursor_init(mc, txn, mc->mc_dbi, mc->mc_xcursor);
|
||||
return MDB_SUCCESS;
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transaction) Delete(b *bucket, key []byte, data []byte) error {
|
||||
func (t *transaction) Delete(b *Bucket, key []byte, data []byte) error {
|
||||
/*
|
||||
MDB_cursor mc;
|
||||
MDB_xcursor mx;
|
||||
|
@ -1360,9 +1360,10 @@ func (t *transaction) Put(b Bucket, key []byte, data []byte, flags int) error {
|
|||
mdb_cursor_init(&mc, txn, dbi, &mx);
|
||||
return mdb_cursor_put(&mc, key, data, flags);
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transaction) Bucket(name string, flags int) (Bucket, error) {
|
||||
func (t *transaction) Bucket(name string, flags int) (*Bucket, error) {
|
||||
/*
|
||||
MDB_val key, data;
|
||||
MDB_dbi i;
|
||||
|
@ -1467,16 +1468,19 @@ func (t *transaction) Bucket(name string, flags int) (Bucket, error) {
|
|||
}
|
||||
|
||||
func (t *transaction) Stat(b Bucket) *Stat {
|
||||
/*
|
||||
if (txn == NULL || arg == NULL || dbi >= txn->mt_numdbs)
|
||||
return EINVAL;
|
||||
|
||||
if (txn->mt_dbflags[dbi] & DB_STALE) {
|
||||
MDB_cursor mc;
|
||||
MDB_xcursor mx;
|
||||
/* Stale, must read the DB's root. cursor_init does it for us. */
|
||||
// Stale, must read the DB's root. cursor_init does it for us.
|
||||
mdb_cursor_init(&mc, txn, dbi, &mx);
|
||||
}
|
||||
return mdb_stat0(txn->mt_env, &txn->mt_dbs[dbi], arg);
|
||||
*/
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *transaction) BucketFlags(b Bucket) (int, error) {
|
||||
|
@ -1490,7 +1494,7 @@ func (t *transaction) BucketFlags(b Bucket) (int, error) {
|
|||
return 0, nil
|
||||
}
|
||||
|
||||
func (t *transaction) Drop(b Bucket int del) error {
|
||||
func (t *transaction) Drop(b *Bucket, del int) error {
|
||||
/*
|
||||
MDB_cursor *mc, *m2;
|
||||
int rc;
|
||||
|
|
Loading…
Reference in New Issue