Rename: pgid pgid => pgId pgid to avoid confusion.

Signed-off-by: Piotr Tabor <ptab@google.com>
This commit is contained in:
Piotr Tabor 2023-01-13 17:40:27 +01:00
parent f16e2522ce
commit 80edaf14f0
5 changed files with 34 additions and 34 deletions

View File

@ -513,8 +513,8 @@ func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
b._forEachPageNode(b.root, 0, fn)
}
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid)
func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgId)
// Execute function.
fn(p, n, depth)
@ -654,11 +654,11 @@ func (b *Bucket) rebalance() {
}
// node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
func (b *Bucket) node(pgId pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil {
if n := b.nodes[pgId]; n != nil {
return n
}
@ -673,12 +673,12 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
// Use the inline page if this is an inline bucket.
var p = b.page
if p == nil {
p = b.tx.page(pgid)
p = b.tx.page(pgId)
}
// Read the page into the node and cache it.
n.read(p)
b.nodes[pgid] = n
b.nodes[pgId] = n
// Update statistics.
b.tx.stats.IncNodeCount(1)

View File

@ -172,13 +172,13 @@ func (c *Cursor) goToFirstElementOnTheStack() {
}
// Keep adding pages pointing to the first element to the stack.
var pgid pgid
var pgId pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
pgId = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
p, n := c.bucket.pageNode(pgId)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
@ -193,13 +193,13 @@ func (c *Cursor) last() {
}
// Keep adding pages pointing to the last element in the stack.
var pgid pgid
var pgId pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
pgId = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
pgId = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
p, n := c.bucket.pageNode(pgId)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
@ -268,8 +268,8 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
}
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
p, n := c.bucket.pageNode(pgid)
func (c *Cursor) search(key []byte, pgId pgid) {
p, n := c.bucket.pageNode(pgId)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
}

View File

@ -256,8 +256,8 @@ func (f *freelist) rollback(txid txid) {
}
// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
_, ok := f.cache[pgid]
func (f *freelist) freed(pgId pgid) bool {
_, ok := f.cache[pgId]
return ok
}

View File

@ -113,9 +113,9 @@ func (n *node) prevSibling() *node {
}
// put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) {
if pgId >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
@ -136,7 +136,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
inode.flags = flags
inode.key = newKey
inode.value = value
inode.pgid = pgid
inode.pgid = pgId
_assert(len(inode.key) > 0, "put: zero-length inode key")
}

View File

@ -104,8 +104,8 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
// key order constraints:
// - keys on pages must be sorted
// - keys on children pages are between 2 consecutive keys on the parent's branch page).
func (tx *Tx) recursivelyCheckPages(pgid pgid, keyToString func([]byte) string, ch chan error) {
tx.recursivelyCheckPagesInternal(pgid, nil, nil, nil, keyToString, ch)
func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) {
tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch)
}
// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are:
@ -114,11 +114,11 @@ func (tx *Tx) recursivelyCheckPages(pgid pgid, keyToString func([]byte) string,
// - Are in right ordering relationship to their parents.
// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
func (tx *Tx) recursivelyCheckPagesInternal(
pgid pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid,
pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid,
keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
p := tx.page(pgid)
pagesStack = append(pagesStack, pgid)
p := tx.page(pgId)
pagesStack = append(pagesStack, pgId)
switch {
case p.flags&branchPageFlag != 0:
// For branch page we navigate ranges of all subpages.
@ -128,13 +128,13 @@ func (tx *Tx) recursivelyCheckPagesInternal(
if i == 0 && runningMin != nil && compareKeys(runningMin, elem.key()) > 0 {
ch <- fmt.Errorf("key (%d, %s) on the branch page(%d) needs to be >="+
" to the key(%s) in the ancestor. Pages stack: %v",
i, keyToString(elem.key()), pgid, keyToString(runningMin), pagesStack)
i, keyToString(elem.key()), pgId, keyToString(runningMin), pagesStack)
}
if maxKeyOpen != nil && compareKeys(elem.key(), maxKeyOpen) >= 0 {
ch <- fmt.Errorf("key (%d: %s) on the branch page(%d) needs to be <"+
" than key of the next element reachable from the ancestor (%v). Pages stack: %v",
i, keyToString(elem.key()), pgid, keyToString(maxKeyOpen), pagesStack)
i, keyToString(elem.key()), pgId, keyToString(maxKeyOpen), pagesStack)
}
var maxKey []byte
@ -153,22 +153,22 @@ func (tx *Tx) recursivelyCheckPagesInternal(
elem := p.leafPageElement(uint16(i))
if i == 0 && runningMin != nil && compareKeys(runningMin, elem.key()) > 0 {
ch <- fmt.Errorf("The first key[%d]=(hex)%s on leaf page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
i, keyToString(elem.key()), pgid, keyToString(runningMin), pagesStack)
i, keyToString(elem.key()), pgId, keyToString(runningMin), pagesStack)
}
if i > 0 {
cmpRet := compareKeys(runningMin, elem.key())
if cmpRet > 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on leaf page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
i, keyToString(elem.key()), pgid, keyToString(runningMin), pagesStack)
i, keyToString(elem.key()), pgId, keyToString(runningMin), pagesStack)
}
if cmpRet == 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on leaf page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
i, keyToString(elem.key()), pgid, keyToString(runningMin), pagesStack)
i, keyToString(elem.key()), pgId, keyToString(runningMin), pagesStack)
}
}
if maxKeyOpen != nil && compareKeys(elem.key(), maxKeyOpen) >= 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on leaf page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
i, keyToString(elem.key()), pgid, keyToString(maxKeyOpen), pagesStack)
i, keyToString(elem.key()), pgId, keyToString(maxKeyOpen), pagesStack)
}
runningMin = elem.key()
}
@ -176,7 +176,7 @@ func (tx *Tx) recursivelyCheckPagesInternal(
return p.leafPageElement(p.count - 1).key()
}
default:
ch <- fmt.Errorf("unexpected page type for pgid:%d", pgid)
ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId)
}
return maxKeyInSubtree
}