Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Db namespaces #2090

Closed
wants to merge 12 commits into from
4 changes: 2 additions & 2 deletions nimbus/db/ledger/distinct_ledgers.nim
Expand Up @@ -47,11 +47,11 @@ proc toSvp*(sl: StorageLedger): seq[(UInt256,UInt256)] =
save = db.trackNewApi
db.trackNewApi = false
defer: db.trackNewApi = save
let kvt = db.newKvt

var kvp: Table[UInt256,UInt256]
try:
for (slotHash,val) in sl.distinctBase.toMpt.pairs:
let rc = kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
let rc = db.newKvt(slotHashToSlot).get(slotHash)
if rc.isErr:
warn "StorageLedger.dump()", slotHash, error=($$rc.error)
else:
Expand Down
20 changes: 17 additions & 3 deletions nimbus/db/storage_types.nim
Expand Up @@ -12,7 +12,11 @@ import
eth/common

type
DBKeyKind* = enum
DbNamespace* = enum
# Start at -1 because the existing code and tests require this.
# Don't change the order or ord values of the below enums or the
# tests will break.
default = -1
genericHash
blockNumberToHash
blockHashToScore
Expand Down Expand Up @@ -102,8 +106,8 @@ proc skeletonBlockHashToNumberKey*(h: Hash256): DbKey {.inline.} =
result.dataEndPos = uint8 32

proc skeletonHeaderKey*(u: BlockNumber): DbKey {.inline.} =
result.data[0] = byte ord(skeletonHeader)
doAssert sizeof(u) <= 32
result.data[0] = byte ord(skeletonHeader)
copyMem(addr result.data[1], unsafeAddr u, sizeof(u))
result.dataEndPos = uint8 sizeof(u)

Expand Down Expand Up @@ -135,9 +139,19 @@ proc blockHashToBlockWitnessKey*(h: Hash256): DbKey {.inline.} =
result.data[1 .. 32] = h.data
result.dataEndPos = uint8 32

proc toDbKey*(key: openArray[byte], namespace: DbNamespace): DbKey {.inline.} =
result.data[0] = byte ord(namespace)
result.data[1 .. 32] = key
result.dataEndPos = uint8(key.len)

template toOpenArray*(k: DbKey): openArray[byte] =
k.data.toOpenArray(0, int(k.dataEndPos))

template toOpenArray*(u: BlockNumber): openArray[byte] =
doAssert sizeof(u) <= 32
var data = newSeq[byte](sizeof(u))
copyMem(addr data[0], unsafeAddr u, sizeof(u))
data

proc `==`*(a, b: DbKey): bool {.inline.} =
a.toOpenArray == b.toOpenArray

2 changes: 1 addition & 1 deletion premix/prestate.nim
@@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2020-2023 Status Research & Development GmbH
# Copyright (c) 2020-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
Expand Down
15 changes: 8 additions & 7 deletions tests/test_sync_snap/test_syncdb.nim
Expand Up @@ -23,15 +23,15 @@ import
./test_helpers

type
UndumpDBKeySubType* = array[DBKeyKind.high.ord+2,int]
UndumpDBKeySubType* = array[DbNamespace.high.ord+2, int]

proc pp*(a: UndumpDBKeySubType): string

# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------

proc pp(a: ((int,int),UndumpDBKeySubType,UndumpDBKeySubType)): string =
proc pp(a: ((int,int),UndumpDBKeySubType, UndumpDBKeySubType)): string =
"([" & $a[0][0] & "," & $a[0][1] & "]," & a[1].pp & "," & a[2].pp & ")"


Expand Down Expand Up @@ -145,7 +145,7 @@ proc test_syncdbImportSnapshot*(
result[0][1].inc
of UndumpKey33:
key = w.key33.toSeq
let inx = min(w.key33[0], DBKeyKind.high.ord+1)
let inx = min(w.key33[0], DbNamespace.high.ord + 1)

#if inx == contractHash.ord:
# let digest = w.data.keccakHash.data.toSeq
Expand All @@ -161,7 +161,7 @@ proc test_syncdbImportSnapshot*(
result[1][inx].inc
of UndumpOther:
key = w.other
let inx = min(w.other[0], DBKeyKind.high.ord+1)
let inx = min(w.other[0], DbNamespace.high.ord+1)
result[2][inx].inc

count.inc
Expand All @@ -187,7 +187,8 @@ proc test_syncdbAppendBlocks*(
let
blkLen = 33
lastBlock = pivotBlock + max(1,nItemsMax).uint64
kvt = chn.com.db.kvt.backend.toLegacy
db = chn.com.db
#kvt = chn.com.db.defaultKvt.backend.toLegacy

# Join (headers,blocks) pair in the range pivotBlock..lastBlock
q = toSeq(filePath.undumpBlocks(pivotBlock,lastBlock)).pairJoin
Expand All @@ -196,8 +197,8 @@ proc test_syncdbAppendBlocks*(
pivNum = q[0][0].blockNumber

# Verify pivot
check 0 < kvt.get(pivHash.toBlockHeaderKey.toOpenArray).len
check pivHash == kvt.get(pivNum.toBlockNumberKey.toOpenArray).decode(Hash256)
check 0 < db.kvt.get(pivHash.toBlockHeaderKey.toOpenArray).len
check pivHash == db.kvt.get(pivNum.toBlockNumberKey.toOpenArray).decode(Hash256)

# Set up genesis deputy.
chn.com.startOfHistory = pivHash
Expand Down