diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml index 3eaf3ec59a..8f7974d4c5 100644 --- a/.github/workflows/kurtosis.yml +++ b/.github/workflows/kurtosis.yml @@ -83,7 +83,7 @@ jobs: cat kurtosis-network-params.yml | envsubst > assertoor.yaml sed -i "s/el_image: .*/el_image: localtestnet/" assertoor.yaml - kurtosis run github.com/ethpandaops/ethereum-package@4.3.0 --enclave assertoor-${{ github.run_id }} --args-file assertoor.yaml + kurtosis run github.com/ethpandaops/ethereum-package --enclave assertoor-${{ github.run_id }} --args-file assertoor.yaml enclave_dump=$(kurtosis enclave inspect assertoor-${{ github.run_id }}) diff --git a/.github/workflows/simulators.yml b/.github/workflows/simulators.yml index d9f2fe440b..3cd8e96a89 100644 --- a/.github/workflows/simulators.yml +++ b/.github/workflows/simulators.yml @@ -43,7 +43,7 @@ jobs: run: | ncpu=$(nproc) make -j${ncpu} ARCH_OVERRIDE=x64 CI_CACHE=NimBinaries update-from-ci - make -j${ncpu} deps + make -j${ncpu} deps rocksdb - name: Run Simulators run: | @@ -84,7 +84,7 @@ jobs: run: | ncpu=$(sysctl -n hw.ncpu) make -j${ncpu} ARCH_OVERRIDE=x64 CI_CACHE=NimBinaries update-from-ci - make -j${ncpu} deps + make -j${ncpu} deps rocksdb - name: Run Simulators run: | diff --git a/Makefile b/Makefile index 6ca25bd4ca..da60ef7b6f 100644 --- a/Makefile +++ b/Makefile @@ -105,6 +105,7 @@ VERIF_PROXY_OUT_PATH ?= build/libverifproxy/ deps \ update \ nimbus \ + nimbus_execution_client \ fluffy \ nimbus_verified_proxy \ libverifproxy \ @@ -157,7 +158,7 @@ GIT_SUBMODULE_UPDATE := git -c submodule."vendor/nimbus-eth2".update=none submod else # "variables.mk" was included. Business as usual until the end of this file. # default target, because it's the first one that doesn't start with '.' -all: | $(TOOLS) nimbus +all: | $(TOOLS) nimbus_execution_client # must be included after the default target -include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk @@ -210,11 +211,12 @@ $(TOOLS): | build deps rocksdb echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:chronicles_log_level=TRACE -o:build/$@ "$${TOOL_DIR}/$@.nim" -# a phony target, because teaching `make` how to do conditional recompilation of Nim projects is too complicated -execution_client_name = nimbus_execution_client -nimbus: | build deps rocksdb - echo -e $(BUILD_MSG) "build/$(execution_client_name)" && \ - $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:chronicles_log_level=TRACE -o:build/$(execution_client_name) "nimbus/$(execution_client_name).nim" +nimbus_execution_client: | build deps rocksdb + echo -e $(BUILD_MSG) "build/nimbus_execution_client" && \ + $(ENV_SCRIPT) nim c $(NIM_PARAMS) -d:chronicles_log_level=TRACE -o:build/nimbus_execution_client "nimbus/nimbus_execution_client.nim" + +nimbus: nimbus_execution_client + echo "The nimbus target is deprecated and will soon change meaning, use 'nimbus_execution_client' instead" # symlink nimbus.nims: @@ -244,7 +246,7 @@ test: | build deps rocksdb $(ENV_SCRIPT) nim test_rocksdb $(NIM_PARAMS) nimbus.nims $(ENV_SCRIPT) nim test $(NIM_PARAMS) nimbus.nims -test_import: nimbus +test_import: nimbus_execution_client $(ENV_SCRIPT) nim test_import $(NIM_PARAMS) nimbus.nims # builds and runs an EVM-related subset of the nimbus test suite @@ -257,10 +259,10 @@ test-evm: | build deps rocksdb # deterministic order for debugging info sections - even with # "-frandom-seed=...". Striping the binaries should make them identical, though. test-reproducibility: - + [ -e build/nimbus_execution_client ] || $(MAKE) V=0 nimbus; \ + + [ -e build/nimbus_execution_client ] || $(MAKE) V=0 nimbus_execution_client; \ MD5SUM1=$$($(MD5SUM) build/nimbus_execution_client | cut -d ' ' -f 1) && \ rm -rf nimcache/*/nimbus_execution_client && \ - $(MAKE) V=0 nimbus && \ + $(MAKE) V=0 nimbus_execution_client && \ MD5SUM2=$$($(MD5SUM) build/nimbus_execution_client | cut -d ' ' -f 1) && \ [ "$$MD5SUM1" = "$$MD5SUM2" ] && echo -e "\e[92mSuccess: identical binaries.\e[39m" || \ { echo -e "\e[91mFailure: the binary changed between builds.\e[39m"; exit 1; } @@ -320,17 +322,17 @@ utp-test: | build deps # Nimbus Verified Proxy related targets # Builds the nimbus_verified_proxy -nimbus_verified_proxy: | build deps +nimbus_verified_proxy: | build deps rocksdb echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim nimbus_verified_proxy $(NIM_PARAMS) nimbus.nims # builds and runs the nimbus_verified_proxy test suite -nimbus-verified-proxy-test: | build deps +nimbus-verified-proxy-test: | build deps rocksdb $(ENV_SCRIPT) nim nimbus_verified_proxy_test $(NIM_PARAMS) nimbus.nims # Shared library for verified proxy -libverifproxy: | build deps +libverifproxy: | build deps rocksdb + echo -e $(BUILD_MSG) "build/$@" && \ $(ENV_SCRIPT) nim --version && \ $(ENV_SCRIPT) nim c --app:lib -d:"libp2p_pki_schemes=secp256k1" --noMain:on --threads:on --nimcache:nimcache/libverifproxy -o:$(VERIF_PROXY_OUT_PATH)/$@.$(VERIF_PROXY_SHAREDLIBEXT) $(NIM_PARAMS) nimbus_verified_proxy/libverifproxy/verifproxy.nim @@ -359,7 +361,7 @@ txparse: | build deps # usual cleaning clean: | clean-common - rm -rf build/{nimbus,fluffy,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(FLUFFY_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_fluffy_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,*.dSYM} + rm -rf build/{nimbus,nimbus_execution_client,fluffy,libverifproxy,nimbus_verified_proxy,$(TOOLS_CSV),$(FLUFFY_TOOLS_CSV),all_tests,test_kvstore_rocksdb,test_rpc,all_fluffy_tests,all_history_network_custom_chain_tests,test_portal_testnet,utp_test_app,utp_test,*.dSYM} rm -rf tools/t8n/{t8n,t8n_test} rm -rf tools/evmstate/{evmstate,evmstate_test} ifneq ($(USE_LIBBACKTRACE), 0) diff --git a/TracerTests.md b/TracerTests.md index c9f85e471e..e02b5c28bb 100644 --- a/TracerTests.md +++ b/TracerTests.md @@ -2,16 +2,16 @@ TracerTests === ## TracerTests ```diff -+ block46147.json OK -+ block46400.json OK -+ block46402.json OK -+ block47205.json OK -+ block48712.json OK -+ block48915.json OK -+ block49018.json OK -+ block97.json OK + block46147.json Skip + block46400.json Skip + block46402.json Skip + block47205.json Skip + block48712.json Skip + block48915.json Skip + block49018.json Skip + block97.json Skip ``` -OK: 8/8 Fail: 0/8 Skip: 0/8 +OK: 0/8 Fail: 0/8 Skip: 8/8 ---TOTAL--- -OK: 8/8 Fail: 0/8 Skip: 0/8 +OK: 0/8 Fail: 0/8 Skip: 8/8 diff --git a/fluffy/common/common_types.nim b/fluffy/common/common_types.nim index 3d67b64728..2f0043154f 100644 --- a/fluffy/common/common_types.nim +++ b/fluffy/common/common_types.nim @@ -36,6 +36,12 @@ func decodeRlp*(input: openArray[byte], T: type): Result[T, string] = except RlpError as e: err(e.msg) +func decodeRlpOrRaise*(input: openArray[byte], T: type): T = + try: + rlp.decode(input, T) + except RlpError as e: + raiseAssert(e.msg) + func decodeSsz*(input: openArray[byte], T: type): Result[T, string] = try: ok(SSZ.decode(input, T)) diff --git a/fluffy/conf.nim b/fluffy/conf.nim index 3e876b3d65..247dfcce16 100644 --- a/fluffy/conf.nim +++ b/fluffy/conf.nim @@ -99,7 +99,8 @@ type portalSubnetworks* {. desc: "Select which networks (Portal sub-protocols) to enable", - defaultValue: {PortalSubnetwork.history, PortalSubnetwork.state}, + defaultValue: + {PortalSubnetwork.history, PortalSubnetwork.state, PortalSubnetwork.beacon}, name: "portal-subnetworks" .}: set[PortalSubnetwork] @@ -258,7 +259,14 @@ type desc: "The maximum number of nodes to send content to during gossip", defaultValue: defaultPortalProtocolConfig.maxGossipNodes, defaultValueDesc: $defaultMaxGossipNodesDesc, - name: "max-gossip-nodes" + name: "debug-max-gossip-nodes" + .}: int + + maxConcurrentOffers* {. + hidden, + desc: "The maximum number of offers to send concurrently", + defaultValue: defaultPortalProtocolConfig.maxConcurrentOffers, + name: "debug-max-concurrent-offers" .}: int radiusConfig* {. @@ -315,14 +323,14 @@ type "Size of the in memory local content cache. This is the max number " & "of content values that can be stored in the cache.", defaultValue: defaultPortalProtocolConfig.contentCacheSize, - name: "content-cache-size" + name: "debug-content-cache-size" .}: int disableContentCache* {. hidden, desc: "Disable the in memory local content cache", defaultValue: defaultPortalProtocolConfig.disableContentCache, - name: "disable-content-cache" + name: "debug-disable-content-cache" .}: bool disablePoke* {. diff --git a/fluffy/database/content_db.nim b/fluffy/database/content_db.nim index 7a80e3f5c7..fd9b0bd5e3 100644 --- a/fluffy/database/content_db.nim +++ b/fluffy/database/content_db.nim @@ -285,45 +285,21 @@ proc close*(db: ContentDB) = db.largestDistanceStmt.disposeSafe() discard db.kv.close() -## Private KvStoreRef Calls - -proc get(kv: KvStoreRef, key: openArray[byte]): Opt[seq[byte]] = - var res: Opt[seq[byte]] - proc onData(data: openArray[byte]) = - res = Opt.some(@data) - - discard kv.get(key, onData).expectDb() - - return res - -proc getSszDecoded(kv: KvStoreRef, key: openArray[byte], T: type auto): Opt[T] = - let res = kv.get(key) - if res.isSome(): - try: - Opt.some(SSZ.decode(res.get(), T)) - except SerializationError: - raiseAssert("Stored data should always be serialized correctly") - else: - Opt.none(T) - ## Private ContentDB calls -proc get(db: ContentDB, key: openArray[byte]): Opt[seq[byte]] = - db.kv.get(key) +template get(db: ContentDB, key: openArray[byte], onData: DataProc): bool = + db.kv.get(key, onData).expectDb() -proc put(db: ContentDB, key, value: openArray[byte]) = +template put(db: ContentDB, key, value: openArray[byte]) = db.kv.put(key, value).expectDb() -proc contains(db: ContentDB, key: openArray[byte]): bool = +template contains(db: ContentDB, key: openArray[byte]): bool = db.kv.contains(key).expectDb() -proc del(db: ContentDB, key: openArray[byte]) = +template del(db: ContentDB, key: openArray[byte]) = # TODO: Do we want to return the bool here too? discard db.kv.del(key).expectDb() -proc getSszDecoded(db: ContentDB, key: openArray[byte], T: type auto): Opt[T] = - db.kv.getSszDecoded(key, T) - ## Public ContentId based ContentDB calls # TODO: Could also decide to use the ContentKey SSZ bytestring, as this is what @@ -334,9 +310,9 @@ proc getSszDecoded(db: ContentDB, key: openArray[byte], T: type auto): Opt[T] = # checked with the Radius/distance of the node anyhow. So lets see how we end up # using this mostly in the code. -proc get*(db: ContentDB, key: ContentId): Opt[seq[byte]] = +proc get*(db: ContentDB, key: ContentId, onData: DataProc): bool = # TODO: Here it is unfortunate that ContentId is a uint256 instead of Digest256. - db.get(key.toBytesBE()) + db.get(key.toBytesBE(), onData) proc put*(db: ContentDB, key: ContentId, value: openArray[byte]) = db.put(key.toBytesBE(), value) @@ -347,9 +323,6 @@ proc contains*(db: ContentDB, key: ContentId): bool = proc del*(db: ContentDB, key: ContentId) = db.del(key.toBytesBE()) -proc getSszDecoded*(db: ContentDB, key: ContentId, T: type auto): Opt[T] = - db.getSszDecoded(key.toBytesBE(), T) - ## Pruning related calls proc deleteContentFraction*( @@ -484,10 +457,15 @@ proc adjustRadius( proc createGetHandler*(db: ContentDB): DbGetHandler = return ( proc(contentKey: ContentKeyByteList, contentId: ContentId): Opt[seq[byte]] = - let content = db.get(contentId).valueOr: - return Opt.none(seq[byte]) + var res: seq[byte] + + proc onData(data: openArray[byte]) = + res = @data - ok(content) + if db.get(contentId, onData): + Opt.some(res) + else: + Opt.none(seq[byte]) ) proc createStoreHandler*(db: ContentDB, cfg: RadiusConfig): DbStoreHandler = @@ -520,6 +498,12 @@ proc createStoreHandler*(db: ContentDB, cfg: RadiusConfig): DbStoreHandler = db.put(contentId, content) ) +proc createContainsHandler*(db: ContentDB): DbContainsHandler = + return ( + proc(contentKey: ContentKeyByteList, contentId: ContentId): bool = + db.contains(contentId) + ) + proc createRadiusHandler*(db: ContentDB): DbRadiusHandler = return ( proc(): UInt256 {.raises: [], gcsafe.} = diff --git a/fluffy/database/content_db_custom_sql_functions.nim b/fluffy/database/content_db_custom_sql_functions.nim index 21fb2eefd4..406f49c361 100644 --- a/fluffy/database/content_db_custom_sql_functions.nim +++ b/fluffy/database/content_db_custom_sql_functions.nim @@ -13,7 +13,7 @@ func xorDistance(a: openArray[byte], b: openArray[byte]): seq[byte] = doAssert(a.len == b.len) let length = a.len - var distance: seq[byte] = newSeq[byte](length) + var distance: seq[byte] = newSeqUninitialized[byte](length) for i in 0 ..< length: distance[i] = a[i] xor b[i] diff --git a/fluffy/docs/the_fluffy_book/docs/index.md b/fluffy/docs/the_fluffy_book/docs/index.md index 41d74b3a6f..031bb1d3ad 100644 --- a/fluffy/docs/the_fluffy_book/docs/index.md +++ b/fluffy/docs/the_fluffy_book/docs/index.md @@ -18,11 +18,39 @@ To quickly get your Fluffy node up and running, follow the quickstart page: - [Quickstart for Windows users](./quick-start-windows.md) - [Quickstart for Docker users](./quick-start-docker.md) -# Development status -The Portal Network is a project still in research phase. -This client is thus still experimental. +## Development status +The Portal Network is a project still in research phase. This client is thus still experimental. -However, the Portal history, beacon and state sub-networks are already operational and can be tested on the public network or in a local testnet. +The development of this client is on par with the latest Portal specifications and will continue to evolve according to the Portal specifications. + +The Portal history, beacon and state sub-networks are already operational on the public Portal mainnet. + +Fluffy is default ran on the [Portal mainnet](https://github.com/ethereum/portal-network-specs/blob/master/bootnodes.md#bootnodes-mainnet) but can also be run on a (local) testnet. + +### Supported sub-networks and content: + +- [History network](https://github.com/ethereum/portal-network-specs/blob/e8e428c55f34893becfe936fe323608e9937956e/history/history-network.md): headers, blocks, and receipts. + - Note: Canonical verification is currently only enabled for pre-merge blocks. +- [State network](https://github.com/ethereum/portal-network-specs/blob/e8e428c55f34893becfe936fe323608e9937956e/state/state-network.md): accounts and contract storage. + - Note: The Portal mainnet does not yet hold all states, nor the recent state. +- [Beacon network](https://github.com/ethereum/portal-network-specs/blob/e8e428c55f34893becfe936fe323608e9937956e/beacon-chain/beacon-network.md): consensus light client data and historical summaries. + +### Supported functionality: + +- [Portal JSON-RPC API](https://github.com/ethereum/portal-network-specs/tree/e8e428c55f34893becfe936fe323608e9937956e/jsonrpc) +- [Consensus light client sync](https://github.com/ethereum/consensus-specs/blob/a09d0c321550c5411557674a981e2b444a1178c0/specs/altair/light-client/light-client.md) through content available on the Portal beacon network. +- Partial support of [Execution JSON-RPC API](https://github.com/ethereum/execution-apis): + - web3_clientVersion + - eth_chainId + - eth_getBalance + - eth_getBlockByHash + - eth_getBlockByNumber + - eth_getBlockTransactionCountByHash + - eth_getCode + - eth_getLogs (partial support: queries by blockhash) + - eth_getProof + - eth_getStorageAt + - eth_getTransactionCount ## Get in touch diff --git a/fluffy/eth_data/era1.nim b/fluffy/eth_data/era1.nim index 060fe746e5..0469bbeedc 100644 --- a/fluffy/eth_data/era1.nim +++ b/fluffy/eth_data/era1.nim @@ -13,7 +13,7 @@ import stew/[endians2, io2, byteutils, arrayops], stint, snappy, - eth/common/[headers, blocks_rlp, receipts_rlp], + eth/common/[headers_rlp, blocks_rlp, receipts_rlp], beacon_chain/spec/beacon_time, ssz_serialization, ncli/e2store, @@ -184,7 +184,7 @@ proc fromCompressedRlpBytes(bytes: openArray[byte], T: type): Result[T, string] try: ok(rlp.decode(decodeFramed(bytes, checkIntegrity = false), T)) except RlpError as e: - err("Invalid Compressed RLP data" & e.msg) + err("Invalid compressed RLP data for " & $T & ": " & e.msg) proc init*(T: type Era1Group, f: IoHandle, startNumber: uint64): Result[T, string] = discard ?f.appendHeader(E2Version, 0) @@ -498,7 +498,7 @@ iterator era1BlockHeaders*(f: Era1File): headers.Header = for blockNumber in startNumber .. endNumber: let header = f.getBlockHeader(blockNumber).valueOr: - raiseAssert("Failed to read block header") + raiseAssert("Failed to read block header: " & error) yield header iterator era1BlockTuples*(f: Era1File): BlockTuple = @@ -508,5 +508,5 @@ iterator era1BlockTuples*(f: Era1File): BlockTuple = for blockNumber in startNumber .. endNumber: let blockTuple = f.getBlockTuple(blockNumber).valueOr: - raiseAssert("Failed to read block header") + raiseAssert("Failed to read block tuple: " & error) yield blockTuple diff --git a/fluffy/fluffy.nim b/fluffy/fluffy.nim index 025badac38..a21cf5e43d 100644 --- a/fluffy/fluffy.nim +++ b/fluffy/fluffy.nim @@ -28,7 +28,7 @@ import ./rpc/[ rpc_eth_api, rpc_debug_api, rpc_discovery_api, rpc_portal_common_api, rpc_portal_history_api, rpc_portal_beacon_api, rpc_portal_state_api, - rpc_portal_debug_history_api, + rpc_portal_nimbus_beacon_api, rpc_portal_debug_history_api, ], ./database/content_db, ./portal_node, @@ -183,7 +183,7 @@ proc run( portalProtocolConfig = PortalProtocolConfig.init( config.tableIpLimit, config.bucketIpLimit, config.bitsPerHop, config.radiusConfig, config.disablePoke, config.maxGossipNodes, config.contentCacheSize, - config.disableContentCache, + config.disableContentCache, config.maxConcurrentOffers, ) portalNodeConfig = PortalNodeConfig( @@ -273,6 +273,7 @@ proc run( rpcServer.installPortalBeaconApiHandlers( node.beaconNetwork.value.portalProtocol ) + rpcServer.installPortalNimbusBeaconApiHandlers(node.beaconNetwork.value) if node.stateNetwork.isSome(): rpcServer.installPortalCommonApiHandlers( node.stateNetwork.value.portalProtocol, PortalSubnetwork.state diff --git a/fluffy/network/beacon/beacon_db.nim b/fluffy/network/beacon/beacon_db.nim index 6481ed6052..c70c7a1a5a 100644 --- a/fluffy/network/beacon/beacon_db.nim +++ b/fluffy/network/beacon/beacon_db.nim @@ -442,68 +442,72 @@ func keepBootstrapsFrom*(db: BeaconDb, minSlot: Slot) = let res = db.bootstraps.keepFromStmt.exec(minSlot.int64) res.expect("SQL query OK") +proc getHandlerImpl( + db: BeaconDb, contentKey: ContentKeyByteList, contentId: ContentId +): results.Opt[seq[byte]] = + let contentKey = contentKey.decode().valueOr: + # TODO: as this should not fail, maybe it is better to raiseAssert ? + return Opt.none(seq[byte]) + + case contentKey.contentType + of unused: + raiseAssert "Should not be used and fail at decoding" + of lightClientBootstrap: + db.getBootstrap(contentId) + of lightClientUpdate: + let + # TODO: add validation that startPeriod is not from the future, + # this requires db to be aware off the current beacon time + startPeriod = contentKey.lightClientUpdateKey.startPeriod + # get max 128 updates + numOfUpdates = min( + uint64(MAX_REQUEST_LIGHT_CLIENT_UPDATES), contentKey.lightClientUpdateKey.count + ) + toPeriod = startPeriod + numOfUpdates # Not inclusive + updates = db.getLightClientUpdates(startPeriod, toPeriod) + + if len(updates) == 0: + Opt.none(seq[byte]) + else: + # Note that this might not return all of the requested updates. + # This might seem faulty/tricky as it is also used in handleOffer to + # check if an offer should be accepted. + # But it is actually fine as this will occur only when the node is + # synced and it would not be able to verify the older updates in the + # range anyhow. + Opt.some(SSZ.encode(updates)) + of lightClientFinalityUpdate: + # TODO: + # Return only when the update is better than what is requested by + # contentKey. This is currently not possible as the contentKey does not + # include best update information. + if db.finalityUpdateCache.isSome(): + let slot = contentKey.lightClientFinalityUpdateKey.finalizedSlot + let cache = db.finalityUpdateCache.get() + if cache.lastFinalityUpdateSlot >= slot: + Opt.some(cache.lastFinalityUpdate) + else: + Opt.none(seq[byte]) + else: + Opt.none(seq[byte]) + of lightClientOptimisticUpdate: + # TODO same as above applies here too. + if db.optimisticUpdateCache.isSome(): + let slot = contentKey.lightClientOptimisticUpdateKey.optimisticSlot + let cache = db.optimisticUpdateCache.get() + if cache.lastOptimisticUpdateSlot >= slot: + Opt.some(cache.lastOptimisticUpdate) + else: + Opt.none(seq[byte]) + else: + Opt.none(seq[byte]) + of beacon_content.ContentType.historicalSummaries: + db.get(contentId) + proc createGetHandler*(db: BeaconDb): DbGetHandler = return ( proc(contentKey: ContentKeyByteList, contentId: ContentId): results.Opt[seq[byte]] = - let contentKey = contentKey.decode().valueOr: - # TODO: as this should not fail, maybe it is better to raiseAssert ? - return Opt.none(seq[byte]) - - case contentKey.contentType - of unused: - raiseAssert "Should not be used and fail at decoding" - of lightClientBootstrap: - db.getBootstrap(contentId) - of lightClientUpdate: - let - # TODO: add validation that startPeriod is not from the future, - # this requires db to be aware off the current beacon time - startPeriod = contentKey.lightClientUpdateKey.startPeriod - # get max 128 updates - numOfUpdates = min( - uint64(MAX_REQUEST_LIGHT_CLIENT_UPDATES), - contentKey.lightClientUpdateKey.count, - ) - toPeriod = startPeriod + numOfUpdates # Not inclusive - updates = db.getLightClientUpdates(startPeriod, toPeriod) - - if len(updates) == 0: - Opt.none(seq[byte]) - else: - # Note that this might not return all of the requested updates. - # This might seem faulty/tricky as it is also used in handleOffer to - # check if an offer should be accepted. - # But it is actually fine as this will occur only when the node is - # synced and it would not be able to verify the older updates in the - # range anyhow. - Opt.some(SSZ.encode(updates)) - of lightClientFinalityUpdate: - # TODO: - # Return only when the update is better than what is requested by - # contentKey. This is currently not possible as the contentKey does not - # include best update information. - if db.finalityUpdateCache.isSome(): - let slot = contentKey.lightClientFinalityUpdateKey.finalizedSlot - let cache = db.finalityUpdateCache.get() - if cache.lastFinalityUpdateSlot >= slot: - Opt.some(cache.lastFinalityUpdate) - else: - Opt.none(seq[byte]) - else: - Opt.none(seq[byte]) - of lightClientOptimisticUpdate: - # TODO same as above applies here too. - if db.optimisticUpdateCache.isSome(): - let slot = contentKey.lightClientOptimisticUpdateKey.optimisticSlot - let cache = db.optimisticUpdateCache.get() - if cache.lastOptimisticUpdateSlot >= slot: - Opt.some(cache.lastOptimisticUpdate) - else: - Opt.none(seq[byte]) - else: - Opt.none(seq[byte]) - of beacon_content.ContentType.historicalSummaries: - db.get(contentId) + db.getHandlerImpl(contentKey, contentId) ) proc createStoreHandler*(db: BeaconDb): DbStoreHandler = @@ -573,6 +577,12 @@ proc createStoreHandler*(db: BeaconDb): DbStoreHandler = db.put(contentId, content) ) +proc createContainsHandler*(db: BeaconDb): DbContainsHandler = + return ( + proc(contentKey: ContentKeyByteList, contentId: ContentId): bool = + db.getHandlerImpl(contentKey, contentId).isSome() + ) + proc createRadiusHandler*(db: BeaconDb): DbRadiusHandler = return ( proc(): UInt256 {.raises: [], gcsafe.} = diff --git a/fluffy/network/beacon/beacon_init_loader.nim b/fluffy/network/beacon/beacon_init_loader.nim index 30c426fe9f..e5f03ed058 100644 --- a/fluffy/network/beacon/beacon_init_loader.nim +++ b/fluffy/network/beacon/beacon_init_loader.nim @@ -14,6 +14,8 @@ import beacon_chain/beacon_clock, beacon_chain/conf +export beacon_clock, network_metadata + type NetworkInitData* = object clock*: BeaconClock metadata*: Eth2NetworkMetadata diff --git a/fluffy/network/beacon/beacon_light_client.nim b/fluffy/network/beacon/beacon_light_client.nim index d3e5df550e..cd4fcee7bd 100644 --- a/fluffy/network/beacon/beacon_light_client.nim +++ b/fluffy/network/beacon/beacon_light_client.nim @@ -9,6 +9,7 @@ import chronicles, + chronos, eth/p2p/discoveryv5/random2, beacon_chain/gossip_processing/light_client_processor, beacon_chain/beacon_clock, diff --git a/fluffy/network/beacon/beacon_network.nim b/fluffy/network/beacon/beacon_network.nim index 5f399ec94f..9ee4de945a 100644 --- a/fluffy/network/beacon/beacon_network.nim +++ b/fluffy/network/beacon/beacon_network.nim @@ -28,9 +28,13 @@ type BeaconNetwork* = ref object processor*: ref LightClientProcessor contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])] forkDigests*: ForkDigests + getBeaconTime: GetBeaconTimeFn + cfg*: RuntimeConfig trustedBlockRoot*: Opt[Eth2Digest] processContentLoop: Future[void] statusLogLoop: Future[void] + onEpochLoop: Future[void] + onPeriodLoop: Future[void] func toContentIdHandler(contentKey: ContentKeyByteList): results.Opt[ContentId] = ok(toContentId(contentKey)) @@ -187,6 +191,8 @@ proc new*( beaconDb: BeaconDb, streamManager: StreamManager, forkDigests: ForkDigests, + getBeaconTime: GetBeaconTimeFn, + cfg: RuntimeConfig, trustedBlockRoot: Opt[Eth2Digest], bootstrapRecords: openArray[Record] = [], portalConfig: PortalProtocolConfig = defaultPortalProtocolConfig, @@ -202,6 +208,7 @@ proc new*( toContentIdHandler, createGetHandler(beaconDb), createStoreHandler(beaconDb), + createContainsHandler(beaconDb), createRadiusHandler(beaconDb), stream, bootstrapRecords, @@ -220,6 +227,8 @@ proc new*( beaconDb: beaconDb, contentQueue: contentQueue, forkDigests: forkDigests, + getBeaconTime: getBeaconTime, + cfg: cfg, trustedBlockRoot: beaconBlockRoot, ) @@ -331,7 +340,10 @@ proc validateContent( n.validateHistoricalSummaries(summariesWithProof) proc validateContent( - n: BeaconNetwork, contentKeys: ContentKeysList, contentItems: seq[seq[byte]] + n: BeaconNetwork, + srcNodeId: Opt[NodeId], + contentKeys: ContentKeysList, + contentItems: seq[seq[byte]], ): Future[bool] {.async: (raises: [CancelledError]).} = # content passed here can have less items then contentKeys, but not more. for i, contentItem in contentItems: @@ -341,20 +353,79 @@ proc validateContent( if validation.isOk(): let contentIdOpt = n.portalProtocol.toContentId(contentKey) if contentIdOpt.isNone(): - error "Received offered content with invalid content key", contentKey + error "Received offered content with invalid content key", srcNodeId, contentKey return false let contentId = contentIdOpt.get() n.portalProtocol.storeContent(contentKey, contentId, contentItem) - info "Received offered content validated successfully", contentKey + debug "Received offered content validated successfully", srcNodeId, contentKey else: - error "Received offered content failed validation", - contentKey, error = validation.error + debug "Received offered content failed validation", + srcNodeId, contentKey, error = validation.error return false return true +proc sleepAsync( + t: TimeDiff +): Future[void] {.async: (raises: [CancelledError], raw: true).} = + sleepAsync(nanoseconds(if t.nanoseconds < 0: 0'i64 else: t.nanoseconds)) + +proc onEpoch(n: BeaconNetwork, wallTime: BeaconTime, wallEpoch: Epoch) = + debug "Epoch transition", epoch = shortLog(wallEpoch) + + n.beaconDb.keepBootstrapsFrom( + Slot((wallEpoch - n.cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS) * SLOTS_PER_EPOCH) + ) + +proc onPeriod(n: BeaconNetwork, wallTime: BeaconTime, wallPeriod: SyncCommitteePeriod) = + debug "Period transition", period = shortLog(wallPeriod) + + n.beaconDb.keepUpdatesFrom(wallPeriod - n.cfg.defaultLightClientDataMaxPeriods()) + +proc onEpochLoop(n: BeaconNetwork) {.async: (raises: []).} = + try: + var + currentEpoch = n.getBeaconTime().slotOrZero().epoch() + nextEpoch = currentEpoch + 1 + timeToNextEpoch = nextEpoch.start_slot().start_beacon_time() - n.getBeaconTime() + while true: + await sleepAsync(timeToNextEpoch) + + let + wallTime = n.getBeaconTime() + wallEpoch = wallTime.slotOrZero().epoch() + + n.onEpoch(wallTime, wallEpoch) + + currentEpoch = wallEpoch + nextEpoch = currentEpoch + 1 + timeToNextEpoch = nextEpoch.start_slot().start_beacon_time() - n.getBeaconTime() + except CancelledError: + trace "onEpochLoop canceled" + +proc onPeriodLoop(n: BeaconNetwork) {.async: (raises: []).} = + try: + var + currentPeriod = n.getBeaconTime().slotOrZero().sync_committee_period() + nextPeriod = currentPeriod + 1 + timeToNextPeriod = nextPeriod.start_slot().start_beacon_time() - n.getBeaconTime() + while true: + await sleepAsync(timeToNextPeriod) + + let + wallTime = n.getBeaconTime() + wallPeriod = wallTime.slotOrZero().sync_committee_period() + + n.onPeriod(wallTime, wallPeriod) + + currentPeriod = wallPeriod + nextPeriod = currentPeriod + 1 + timeToNextPeriod = nextPeriod.start_slot().start_beacon_time() - n.getBeaconTime() + except CancelledError: + trace "onPeriodLoop canceled" + proc processContentLoop(n: BeaconNetwork) {.async: (raises: []).} = try: while true: @@ -364,7 +435,7 @@ proc processContentLoop(n: BeaconNetwork) {.async: (raises: []).} = # dropped and not gossiped around. # TODO: Differentiate between failures due to invalid data and failures # due to missing network data for validation. - if await n.validateContent(contentKeys, contentItems): + if await n.validateContent(srcNodeId, contentKeys, contentItems): asyncSpawn n.portalProtocol.randomGossipDiscardPeers( srcNodeId, contentKeys, contentItems ) @@ -387,6 +458,8 @@ proc start*(n: BeaconNetwork) = n.portalProtocol.start() n.processContentLoop = processContentLoop(n) n.statusLogLoop = statusLogLoop(n) + n.onEpochLoop = onEpochLoop(n) + n.onPeriodLoop = onPeriodLoop(n) proc stop*(n: BeaconNetwork) {.async: (raises: []).} = info "Stopping Portal beacon chain network" @@ -400,6 +473,12 @@ proc stop*(n: BeaconNetwork) {.async: (raises: []).} = if not n.statusLogLoop.isNil(): futures.add(n.statusLogLoop.cancelAndWait()) + if not n.onEpochLoop.isNil(): + futures.add(n.onEpochLoop.cancelAndWait()) + + if not n.onPeriodLoop.isNil(): + futures.add(n.onPeriodLoop.cancelAndWait()) + await noCancel(allFutures(futures)) n.beaconDb.close() diff --git a/fluffy/network/history/history_network.nim b/fluffy/network/history/history_network.nim index 76d8ca6e5e..b975e65d58 100644 --- a/fluffy/network/history/history_network.nim +++ b/fluffy/network/history/history_network.nim @@ -18,7 +18,7 @@ import ../../database/content_db, ../../network_metadata, ../wire/[portal_protocol, portal_stream, portal_protocol_config], - "."/[history_content, validation/historical_hashes_accumulator], + "."/[history_content, history_validation, history_type_conversions], ../beacon/beacon_chain_historical_roots, ./content/content_deprecated @@ -28,7 +28,7 @@ from eth/common/accounts import EMPTY_ROOT_HASH logScope: topics = "portal_hist" -export historical_hashes_accumulator, blocks_rlp +export blocks_rlp type HistoryNetwork* = ref object @@ -46,303 +46,25 @@ type func toContentIdHandler(contentKey: ContentKeyByteList): results.Opt[ContentId] = ok(toContentId(contentKey)) -## Calls to go from SSZ decoded Portal types to RLP fully decoded EL types +## Get local content calls -func fromPortalBlockBody*( - T: type BlockBody, body: PortalBlockBodyLegacy -): Result[T, string] = - ## Get the EL BlockBody from the SSZ-decoded `PortalBlockBodyLegacy`. - try: - var transactions: seq[Transaction] - for tx in body.transactions: - transactions.add(rlp.decode(tx.asSeq(), Transaction)) - - let uncles = rlp.decode(body.uncles.asSeq(), seq[Header]) - - ok(BlockBody(transactions: transactions, uncles: uncles)) - except RlpError as e: - err("RLP decoding failed: " & e.msg) - -func fromPortalBlockBody*( - T: type BlockBody, body: PortalBlockBodyShanghai -): Result[T, string] = - ## Get the EL BlockBody from the SSZ-decoded `PortalBlockBodyShanghai`. - try: - var transactions: seq[Transaction] - for tx in body.transactions: - transactions.add(rlp.decode(tx.asSeq(), Transaction)) - - var withdrawals: seq[Withdrawal] - for w in body.withdrawals: - withdrawals.add(rlp.decode(w.asSeq(), Withdrawal)) - - ok( - BlockBody( - transactions: transactions, - uncles: @[], # Uncles must be empty, this is verified in `validateBlockBody` - withdrawals: Opt.some(withdrawals), - ) - ) - except RlpError as e: - err("RLP decoding failed: " & e.msg) - -func fromPortalBlockBodyOrRaise*( - T: type BlockBody, body: PortalBlockBodyLegacy | PortalBlockBodyShanghai -): T = - ## Get the EL BlockBody from one of the SSZ-decoded Portal BlockBody types. - ## Will raise Assertion in case of invalid RLP encodings. Only use of data - ## has been validated before! - let res = BlockBody.fromPortalBlockBody(body) - if res.isOk(): - res.get() - else: - raiseAssert(res.error) - -func fromPortalReceipts*( - T: type seq[Receipt], receipts: PortalReceipts -): Result[T, string] = - ## Get the full decoded EL seq[Receipt] from the SSZ-decoded `PortalReceipts`. - try: - var res: seq[Receipt] - for receipt in receipts: - res.add(rlp.decode(receipt.asSeq(), Receipt)) - - ok(res) - except RlpError as e: - err("RLP decoding failed: " & e.msg) - -## Calls to encode EL block types to the SSZ encoded Portal types. - -# TODO: The fact that we have different Portal BlockBody types for the different -# forks but not for the EL BlockBody (usage of Option) does not play so well -# together. - -func fromBlockBody*(T: type PortalBlockBodyLegacy, body: BlockBody): T = - var transactions: Transactions - for tx in body.transactions: - discard transactions.add(TransactionByteList(rlp.encode(tx))) - - let uncles = Uncles(rlp.encode(body.uncles)) - - PortalBlockBodyLegacy(transactions: transactions, uncles: uncles) - -func fromBlockBody*(T: type PortalBlockBodyShanghai, body: BlockBody): T = - var transactions: Transactions - for tx in body.transactions: - discard transactions.add(TransactionByteList(rlp.encode(tx))) - - let uncles = Uncles(rlp.encode(body.uncles)) - - doAssert(body.withdrawals.isSome()) - - var withdrawals: Withdrawals - for w in body.withdrawals.get(): - discard withdrawals.add(WithdrawalByteList(rlp.encode(w))) - PortalBlockBodyShanghai( - transactions: transactions, uncles: uncles, withdrawals: withdrawals - ) - -func fromReceipts*(T: type PortalReceipts, receipts: seq[Receipt]): T = - var portalReceipts: PortalReceipts - for receipt in receipts: - discard portalReceipts.add(ReceiptByteList(rlp.encode(receipt))) - - portalReceipts - -func encode*(blockBody: BlockBody): seq[byte] = - if blockBody.withdrawals.isSome(): - SSZ.encode(PortalBlockBodyShanghai.fromBlockBody(blockBody)) - else: - SSZ.encode(PortalBlockBodyLegacy.fromBlockBody(blockBody)) - -func encode*(receipts: seq[Receipt]): seq[byte] = - let portalReceipts = PortalReceipts.fromReceipts(receipts) - - SSZ.encode(portalReceipts) - -## Calls and helper calls to do validation of block header, body and receipts -# TODO: Failures on validation and perhaps deserialisation should be punished -# for if/when peer scoring/banning is added. - -func validateBlockHeader*(header: Header, blockHash: Hash32): Result[void, string] = - if not (header.rlpHash() == blockHash): - err("Block header hash does not match") - else: - ok() - -func validateBlockHeader*(header: Header, number: uint64): Result[void, string] = - if not (header.number == number): - err("Block header number does not match") - else: - ok() - -func validateBlockHeaderBytes*( - bytes: openArray[byte], id: uint64 | Hash32 -): Result[Header, string] = - let header = ?decodeRlp(bytes, Header) - - # Note: - # One could do additional quick-checks here such as timestamp vs the optional - # (later forks) added fields. E.g. Shanghai field, Cancun fields, - # zero ommersHash, etc. - # However, the block hash comparison will obviously catch these and it is - # pretty trivial to provide a non-canonical valid header. - # It might be somewhat more useful if just done (temporarily) for the headers - # post-merge which are currently provided without proof. - # For comparison by number this is obviously not sufficient as any other field - # could be manipulated and because of this a block header proof will always - # be needed. - - ?header.validateBlockHeader(id) - - ok(header) - -template append*(w: var RlpWriter, v: TransactionByteList) = - w.appendRawBytes(v.asSeq) - -template append*(w: var RlpWriter, v: WithdrawalByteList) = - w.appendRawBytes(v.asSeq) - -template append*(w: var RlpWriter, v: ReceiptByteList) = - w.appendRawBytes(v.asSeq) - -proc validateBlockBody*( - body: PortalBlockBodyLegacy, header: Header -): Result[void, string] = - ## Validate the block body against the txRoot and ommersHash from the header. - let calculatedOmmersHash = keccak256(body.uncles.asSeq()) - if calculatedOmmersHash != header.ommersHash: - return err("Invalid ommers hash") - - let calculatedTxsRoot = orderedTrieRoot(body.transactions.asSeq) - if calculatedTxsRoot != header.txRoot: - return err( - "Invalid transactions root: expected " & $header.txRoot & " - got " & - $calculatedTxsRoot - ) - - ok() - -proc validateBlockBody*( - body: PortalBlockBodyShanghai, header: Header -): Result[void, string] = - ## Validate the block body against the txRoot, ommersHash and withdrawalsRoot - ## from the header. - # Shortcut the ommersHash calculation as uncles must be an RLP encoded - # empty list - if body.uncles.asSeq() != @[byte 0xc0]: - return err("Invalid ommers hash, uncles list is not empty") - - let calculatedTxsRoot = orderedTrieRoot(body.transactions.asSeq) - if calculatedTxsRoot != header.txRoot: - return err( - "Invalid transactions root: expected " & $header.txRoot & " - got " & - $calculatedTxsRoot - ) - - # TODO: This check is done higher up but perhaps this can become cleaner with - # some refactor. - doAssert(header.withdrawalsRoot.isSome()) - - let - calculatedWithdrawalsRoot = orderedTrieRoot(body.withdrawals.asSeq) - headerWithdrawalsRoot = header.withdrawalsRoot.get() - if calculatedWithdrawalsRoot != headerWithdrawalsRoot: - return err( - "Invalid withdrawals root: expected " & $headerWithdrawalsRoot & " - got " & - $calculatedWithdrawalsRoot - ) - - ok() - -proc decodeBlockBodyBytes*(bytes: openArray[byte]): Result[BlockBody, string] = - if (let body = decodeSsz(bytes, PortalBlockBodyShanghai); body.isOk()): - BlockBody.fromPortalBlockBody(body.get()) - elif (let body = decodeSsz(bytes, PortalBlockBodyLegacy); body.isOk()): - BlockBody.fromPortalBlockBody(body.get()) - else: - err("All Portal block body decodings failed") - -proc validateBlockBodyBytes*( - bytes: openArray[byte], header: Header -): Result[BlockBody, string] = - ## Fully decode the SSZ encoded Portal Block Body and validate it against the - ## header. - ## TODO: improve this decoding in combination with the block body validation - ## calls. - let timestamp = Moment.init(header.timestamp.int64, Second) - # TODO: The additional header checks are not needed as header is implicitly - # verified by means of the accumulator? Except that we don't use this yet - # post merge, so the checks are still useful, for now. - if isShanghai(chainConfig, timestamp): - if header.withdrawalsRoot.isNone(): - err("Expected withdrawalsRoot for Shanghai block") - elif header.ommersHash != EMPTY_UNCLE_HASH: - err("Expected empty uncles for a Shanghai block") - else: - let body = ?decodeSsz(bytes, PortalBlockBodyShanghai) - ?validateBlockBody(body, header) - BlockBody.fromPortalBlockBody(body) - elif isPoSBlock(chainConfig, header.number): - if header.withdrawalsRoot.isSome(): - err("Expected no withdrawalsRoot for pre Shanghai block") - elif header.ommersHash != EMPTY_UNCLE_HASH: - err("Expected empty uncles for a PoS block") - else: - let body = ?decodeSsz(bytes, PortalBlockBodyLegacy) - ?validateBlockBody(body, header) - BlockBody.fromPortalBlockBody(body) - else: - if header.withdrawalsRoot.isSome(): - err("Expected no withdrawalsRoot for pre Shanghai block") - else: - let body = ?decodeSsz(bytes, PortalBlockBodyLegacy) - ?validateBlockBody(body, header) - BlockBody.fromPortalBlockBody(body) - -proc validateReceipts*( - receipts: PortalReceipts, receiptsRoot: Hash32 -): Result[void, string] = - if orderedTrieRoot(receipts.asSeq) != receiptsRoot: - err("Unexpected receipt root") - else: - ok() - -proc validateReceiptsBytes*( - bytes: openArray[byte], receiptsRoot: Hash32 -): Result[seq[Receipt], string] = - ## Fully decode the SSZ encoded receipts and validate it against the header's - ## receipts root. - let receipts = ?decodeSsz(bytes, PortalReceipts) - - ?validateReceipts(receipts, receiptsRoot) - - seq[Receipt].fromPortalReceipts(receipts) - -## Content helper calls for specific history network types - -proc getContent( +proc getLocalContent( n: HistoryNetwork, T: type Header, contentKey: ContentKeyByteList, contentId: ContentId, ): Opt[T] = - let localContent = n.portalProtocol.getLocalContent(contentKey, contentId).valueOr: - return Opt.none(T) + let + localContent = n.portalProtocol.getLocalContent(contentKey, contentId).valueOr: + return Opt.none(T) - let headerWithProof = - try: - SSZ.decode(localContent, BlockHeaderWithProof) - except SerializationError as e: - raiseAssert(e.msg) + # Stored data should always be serialized correctly + headerWithProof = decodeSszOrRaise(localContent, BlockHeaderWithProof) + header = decodeRlpOrRaise(headerWithProof.header.asSeq(), T) - let res = decodeRlp(headerWithProof.header.asSeq(), T) - if res.isErr(): - raiseAssert(res.error) - else: - Opt.some(res.get()) + Opt.some(header) -proc getContent( +proc getLocalContent( n: HistoryNetwork, T: type BlockBody, contentKey: ContentKeyByteList, @@ -370,40 +92,22 @@ proc getContent( Opt.some(body) -proc getContent( +proc getLocalContent( n: HistoryNetwork, T: type seq[Receipt], contentKey: ContentKeyByteList, contentId: ContentId, ): Opt[T] = - let localContent = n.portalProtocol.getLocalContent(contentKey, contentId).valueOr: - return Opt.none(T) + let + localContent = n.portalProtocol.getLocalContent(contentKey, contentId).valueOr: + return Opt.none(T) - let portalReceipts = - try: - SSZ.decode(localContent, PortalReceipts) - except SerializationError: - raiseAssert("Stored data should always be serialized correctly") + # Stored data should always be serialized correctly + portalReceipts = decodeSszOrRaise(localContent, PortalReceipts) + receipts = T.fromPortalReceipts(portalReceipts).valueOr: + raiseAssert(error) - let res = T.fromPortalReceipts(portalReceipts) - if res.isErr(): - raiseAssert(res.error) - else: - Opt.some(res.get()) - -proc getContent( - n: HistoryNetwork, - T: type EpochRecord, - contentKey: ContentKeyByteList, - contentId: ContentId, -): Opt[T] = - let localContent = n.portalProtocol.getLocalContent(contentKey, contentId).valueOr: - return Opt.none(T) - - try: - Opt.some(SSZ.decode(localContent, T)) - except SerializationError: - raiseAssert("Stored data should always be serialized correctly") + Opt.some(receipts) ## Public API to get the history network specific types, either from database ## or through a lookup on the Portal Network @@ -416,11 +120,6 @@ proc getContent( # ongoing requests are cancelled after the receival of the first response, # however that response is not yet validated at that moment. -func verifyHeader( - n: HistoryNetwork, header: Header, proof: BlockHeaderProof -): Result[void, string] = - verifyHeader(n.accumulator, header, proof) - proc getVerifiedBlockHeader*( n: HistoryNetwork, id: Hash32 | uint64 ): Future[Opt[Header]] {.async: (raises: [CancelledError]).} = @@ -435,10 +134,10 @@ proc getVerifiedBlockHeader*( # Note: This still requests a BlockHeaderWithProof from the database, as that # is what is stored. But the proof doesn't need to be verified as it gets # gets verified before storing. - let headerFromDb = n.getContent(Header, contentKey, contentId) - if headerFromDb.isSome(): - info "Fetched block header from database" - return headerFromDb + let localContent = n.getLocalContent(Header, contentKey, contentId) + if localContent.isSome(): + debug "Fetched block header locally" + return localContent for i in 0 ..< (1 + n.contentRequestRetries): let @@ -446,19 +145,11 @@ proc getVerifiedBlockHeader*( warn "Failed fetching block header with proof from the network" return Opt.none(Header) - headerWithProof = decodeSsz(headerContent.content, BlockHeaderWithProof).valueOr: - warn "Failed decoding header with proof", error = error - continue - - header = validateBlockHeaderBytes(headerWithProof.header.asSeq(), id).valueOr: + header = validateCanonicalHeaderBytes(headerContent.content, id, n.accumulator).valueOr: warn "Validation of block header failed", error = error continue - if (let r = n.verifyHeader(header, headerWithProof.proof); r.isErr): - warn "Verification of block header failed", error = r.error - continue - - info "Fetched valid block header from the network" + debug "Fetched valid block header from the network" # Content is valid, it can be stored and propagated to interested peers n.portalProtocol.storeContent( contentKey, contentId, headerContent.content, cacheContent = true @@ -470,7 +161,7 @@ proc getVerifiedBlockHeader*( return Opt.some(header) # Headers were requested `1 + requestRetries` times and all failed on validation - return Opt.none(Header) + Opt.none(Header) proc getBlockBody*( n: HistoryNetwork, blockHash: Hash32, header: Header @@ -487,10 +178,10 @@ proc getBlockBody*( blockHash contentKey - let bodyFromDb = n.getContent(BlockBody, contentKey, contentId, header) - if bodyFromDb.isSome(): - info "Fetched block body from database" - return bodyFromDb + let localContent = n.getLocalContent(BlockBody, contentKey, contentId, header) + if localContent.isSome(): + debug "Fetched block body locally" + return localContent for i in 0 ..< (1 + n.contentRequestRetries): let @@ -502,7 +193,7 @@ proc getBlockBody*( warn "Validation of block body failed", error continue - info "Fetched block body from the network" + debug "Fetched block body from the network" # Content is valid, it can be stored and propagated to interested peers n.portalProtocol.storeContent( contentKey, contentId, bodyContent.content, cacheContent = true @@ -514,7 +205,7 @@ proc getBlockBody*( return Opt.some(body) # Bodies were requested `1 + requestRetries` times and all failed on validation - return Opt.none(BlockBody) + Opt.none(BlockBody) proc getBlock*( n: HistoryNetwork, id: Hash32 | uint64 @@ -537,7 +228,7 @@ proc getBlock*( warn "Failed to get body when getting block", hash return Opt.none(Block) - return Opt.some((header, body)) + Opt.some((header, body)) proc getBlockHashByNumber*( n: HistoryNetwork, blockNumber: uint64 @@ -562,10 +253,10 @@ proc getReceipts*( blockHash contentKey - let receiptsFromDb = n.getContent(seq[Receipt], contentKey, contentId) - if receiptsFromDb.isSome(): - info "Fetched receipts from database" - return receiptsFromDb + let localContent = n.getLocalContent(seq[Receipt], contentKey, contentId) + if localContent.isSome(): + debug "Fetched receipts locally" + return localContent for i in 0 ..< (1 + n.contentRequestRetries): let @@ -576,7 +267,7 @@ proc getReceipts*( warn "Validation of receipts failed", error continue - info "Fetched receipts from the network" + debug "Fetched receipts from the network" # Content is valid, it can be stored and propagated to interested peers n.portalProtocol.storeContent( contentKey, contentId, receiptsContent.content, cacheContent = true @@ -588,71 +279,45 @@ proc getReceipts*( return Opt.some(receipts) # Receipts were requested `1 + requestRetries` times and all failed on validation - return Opt.none(seq[Receipt]) + Opt.none(seq[Receipt]) proc validateContent( - n: HistoryNetwork, content: seq[byte], contentKey: ContentKeyByteList -): Future[bool] {.async: (raises: [CancelledError]).} = - let key = contentKey.decode().valueOr: - return false + n: HistoryNetwork, content: seq[byte], contentKeyBytes: ContentKeyByteList +): Future[Result[void, string]] {.async: (raises: [CancelledError]).} = + let contentKey = contentKeyBytes.decode().valueOr: + return err("Error decoding content key") - case key.contentType + case contentKey.contentType of blockHeader: - let - headerWithProof = decodeSsz(content, BlockHeaderWithProof).valueOr: - warn "Failed decoding header with proof", error - return false - header = validateBlockHeaderBytes( - headerWithProof.header.asSeq(), key.blockHeaderKey.blockHash - ).valueOr: - warn "Invalid block header offered", error - return false + let _ = validateCanonicalHeaderBytes( + content, contentKey.blockHeaderKey.blockHash, n.accumulator + ).valueOr: + return err("Failed validating block header: " & error) - let res = n.verifyHeader(header, headerWithProof.proof) - if res.isErr(): - warn "Failed on check if header is part of canonical chain", error = res.error - return false - else: - return true + ok() of blockBody: - let header = (await n.getVerifiedBlockHeader(key.blockBodyKey.blockHash)).valueOr: - warn "Failed getting canonical header for block" - return false + let + header = (await n.getVerifiedBlockHeader(contentKey.blockBodyKey.blockHash)).valueOr: + return err("Failed getting canonical header for block") + _ = validateBlockBodyBytes(content, header).valueOr: + return err("Failed validating block body: " & error) - let res = validateBlockBodyBytes(content, header) - if res.isErr(): - warn "Failed validating block body", error = res.error - return false - else: - return true + ok() of receipts: - let header = (await n.getVerifiedBlockHeader(key.receiptsKey.blockHash)).valueOr: - warn "Failed getting canonical header for receipts" - return false + let + header = (await n.getVerifiedBlockHeader(contentKey.receiptsKey.blockHash)).valueOr: + return err("Failed getting canonical header for receipts") + _ = validateReceiptsBytes(content, header.receiptsRoot).valueOr: + return err("Failed validating receipts: " & error) - let res = validateReceiptsBytes(content, header.receiptsRoot) - if res.isErr(): - warn "Failed validating receipts", error = res.error - return false - else: - return true + ok() of blockNumber: - let - headerWithProof = decodeSsz(content, BlockHeaderWithProof).valueOr: - warn "Failed decoding header with proof", error - return false - header = validateBlockHeaderBytes( - headerWithProof.header.asSeq(), key.blockNumberKey.blockNumber - ).valueOr: - warn "Invalid block header offered", error - return false + let _ = validateCanonicalHeaderBytes( + content, contentKey.blockNumberKey.blockNumber, n.accumulator + ).valueOr: + return err("Failed validating block header: " & error) - let res = n.verifyHeader(header, headerWithProof.proof) - if res.isErr(): - warn "Failed on check if header is part of canonical chain", error = res.error - return false - else: - return true + ok() proc new*( T: type HistoryNetwork, @@ -677,6 +342,7 @@ proc new*( toContentIdHandler, createGetHandler(contentDB), createStoreHandler(contentDB, portalConfig.radiusConfig), + createContainsHandler(contentDB), createRadiusHandler(contentDB), stream, bootstrapRecords, @@ -693,21 +359,26 @@ proc new*( ) proc validateContent( - n: HistoryNetwork, contentKeys: ContentKeysList, contentItems: seq[seq[byte]] + n: HistoryNetwork, + srcNodeId: Opt[NodeId], + contentKeys: ContentKeysList, + contentItems: seq[seq[byte]], ): Future[bool] {.async: (raises: [CancelledError]).} = # content passed here can have less items then contentKeys, but not more. for i, contentItem in contentItems: let contentKey = contentKeys[i] - if await n.validateContent(contentItem, contentKey): + let res = await n.validateContent(contentItem, contentKey) + if res.isOk(): let contentId = n.portalProtocol.toContentId(contentKey).valueOr: - error "Received offered content with invalid content key", contentKey + warn "Received offered content with invalid content key", srcNodeId, contentKey return false n.portalProtocol.storeContent(contentKey, contentId, contentItem) - info "Received offered content validated successfully", contentKey + debug "Received offered content validated successfully", srcNodeId, contentKey else: - error "Received offered content failed validation", contentKey + debug "Received offered content failed validation", + srcNodeId, contentKey, error = res.error return false return true @@ -721,7 +392,7 @@ proc processContentLoop(n: HistoryNetwork) {.async: (raises: []).} = # dropped and not gossiped around. # TODO: Differentiate between failures due to invalid data and failures # due to missing network data for validation. - if await n.validateContent(contentKeys, contentItems): + if await n.validateContent(srcNodeId, contentKeys, contentItems): asyncSpawn n.portalProtocol.neighborhoodGossipDiscardPeers( srcNodeId, contentKeys, contentItems ) diff --git a/fluffy/network/history/history_type_conversions.nim b/fluffy/network/history/history_type_conversions.nim new file mode 100644 index 0000000000..0cd3c3276a --- /dev/null +++ b/fluffy/network/history/history_type_conversions.nim @@ -0,0 +1,145 @@ +# Fluffy +# Copyright (c) 2021-2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + eth/common/[headers_rlp, blocks_rlp, receipts_rlp, transactions_rlp], + ./history_content + +export history_content, headers_rlp, blocks_rlp, receipts_rlp + +## Calls to go from SSZ decoded Portal types to RLP fully decoded EL types + +func fromPortalBlockBody*( + T: type BlockBody, body: PortalBlockBodyLegacy +): Result[T, string] = + ## Get the EL BlockBody from the SSZ-decoded `PortalBlockBodyLegacy`. + try: + var transactions: seq[Transaction] + for tx in body.transactions: + transactions.add(rlp.decode(tx.asSeq(), Transaction)) + + let uncles = rlp.decode(body.uncles.asSeq(), seq[Header]) + + ok(BlockBody(transactions: transactions, uncles: uncles)) + except RlpError as e: + err("RLP decoding failed: " & e.msg) + +func fromPortalBlockBody*( + T: type BlockBody, body: PortalBlockBodyShanghai +): Result[T, string] = + ## Get the EL BlockBody from the SSZ-decoded `PortalBlockBodyShanghai`. + try: + var transactions: seq[Transaction] + for tx in body.transactions: + transactions.add(rlp.decode(tx.asSeq(), Transaction)) + + var withdrawals: seq[Withdrawal] + for w in body.withdrawals: + withdrawals.add(rlp.decode(w.asSeq(), Withdrawal)) + + ok( + BlockBody( + transactions: transactions, + uncles: @[], # Uncles must be empty, this is verified in `validateBlockBody` + withdrawals: Opt.some(withdrawals), + ) + ) + except RlpError as e: + err("RLP decoding failed: " & e.msg) + +func fromPortalBlockBodyBytes*(bytes: openArray[byte]): Result[BlockBody, string] = + if (let res = decodeSsz(bytes, PortalBlockBodyLegacy); res.isOk()): + BlockBody.fromPortalBlockBody(res.value()) + elif (let res = decodeSsz(bytes, PortalBlockBodyShanghai); res.isOk()): + BlockBody.fromPortalBlockBody(res.value()) + else: + err("Invalid Portal BlockBody encoding") + +func fromPortalBlockBodyOrRaise*( + T: type BlockBody, body: PortalBlockBodyLegacy | PortalBlockBodyShanghai +): T = + ## Get the EL BlockBody from one of the SSZ-decoded Portal BlockBody types. + ## Will raise Assertion in case of invalid RLP encodings. Only use for data + ## has been validated before! + let res = BlockBody.fromPortalBlockBody(body) + if res.isOk(): + res.get() + else: + raiseAssert(res.error) + +func fromPortalReceipts*( + T: type seq[Receipt], receipts: PortalReceipts +): Result[T, string] = + ## Get the full decoded EL seq[Receipt] from the SSZ-decoded `PortalReceipts`. + try: + var res: seq[Receipt] + for receipt in receipts: + res.add(rlp.decode(receipt.asSeq(), Receipt)) + + ok(res) + except RlpError as e: + err("RLP decoding failed: " & e.msg) + +## Calls to convert EL block types to the Portal types. + +func fromBlockBody*(T: type PortalBlockBodyLegacy, body: BlockBody): T = + var transactions: Transactions + for tx in body.transactions: + discard transactions.add(TransactionByteList(rlp.encode(tx))) + + let uncles = Uncles(rlp.encode(body.uncles)) + + PortalBlockBodyLegacy(transactions: transactions, uncles: uncles) + +func fromBlockBody*(T: type PortalBlockBodyShanghai, body: BlockBody): T = + var transactions: Transactions + for tx in body.transactions: + discard transactions.add(TransactionByteList(rlp.encode(tx))) + + let uncles = Uncles(rlp.encode(body.uncles)) + + doAssert(body.withdrawals.isSome()) + + var withdrawals: Withdrawals + for w in body.withdrawals.get(): + discard withdrawals.add(WithdrawalByteList(rlp.encode(w))) + PortalBlockBodyShanghai( + transactions: transactions, uncles: uncles, withdrawals: withdrawals + ) + +func fromReceipts*(T: type PortalReceipts, receipts: seq[Receipt]): T = + var portalReceipts: PortalReceipts + for receipt in receipts: + discard portalReceipts.add(ReceiptByteList(rlp.encode(receipt))) + + portalReceipts + +## Calls to encode EL types to the SSZ encoded Portal types. + +func encode*(blockBody: BlockBody): seq[byte] = + if blockBody.withdrawals.isSome(): + SSZ.encode(PortalBlockBodyShanghai.fromBlockBody(blockBody)) + else: + SSZ.encode(PortalBlockBodyLegacy.fromBlockBody(blockBody)) + +func encode*(receipts: seq[Receipt]): seq[byte] = + let portalReceipts = PortalReceipts.fromReceipts(receipts) + + SSZ.encode(portalReceipts) + +## RLP writer append calls for the Portal/SSZ types + +template append*(w: var RlpWriter, v: TransactionByteList) = + w.appendRawBytes(v.asSeq) + +template append*(w: var RlpWriter, v: WithdrawalByteList) = + w.appendRawBytes(v.asSeq) + +template append*(w: var RlpWriter, v: ReceiptByteList) = + w.appendRawBytes(v.asSeq) diff --git a/fluffy/network/history/history_validation.nim b/fluffy/network/history/history_validation.nim new file mode 100644 index 0000000000..90f9406730 --- /dev/null +++ b/fluffy/network/history/history_validation.nim @@ -0,0 +1,177 @@ +# Fluffy +# Copyright (c) 2021-2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import + chronos/timer, + eth/trie/ordered_trie, + ../../network_metadata, + ./history_type_conversions, + ./validation/historical_hashes_accumulator + +from eth/common/eth_types_rlp import rlpHash + +export historical_hashes_accumulator + +func validateHeader(header: Header, blockHash: Hash32): Result[void, string] = + if not (header.rlpHash() == blockHash): + err("Header hash does not match") + else: + ok() + +func validateHeader(header: Header, number: uint64): Result[void, string] = + if not (header.number == number): + err("Header number does not match") + else: + ok() + +func validateHeaderBytes*( + bytes: openArray[byte], id: uint64 | Hash32 +): Result[Header, string] = + # Note: + # No additional quick-checks are addedhere such as timestamp vs the optional + # (later forks) added fields. E.g. Shanghai field, Cancun fields, + # zero ommersHash, etc. + # This is because the block hash comparison + canonical verification will + # catch these. For comparison by number this is will also be caught by the + # canonical verification. + let header = ?decodeRlp(bytes, Header) + + ?header.validateHeader(id) + + ok(header) + +func verifyBlockHeaderProof*( + a: FinishedHistoricalHashesAccumulator, header: Header, proof: BlockHeaderProof +): Result[void, string] = + case proof.proofType + of BlockHeaderProofType.historicalHashesAccumulatorProof: + a.verifyAccumulatorProof(header, proof.historicalHashesAccumulatorProof) + of BlockHeaderProofType.none: + if header.isPreMerge(): + err("Pre merge header requires HistoricalHashesAccumulatorProof") + else: + # TODO: + # Add verification post merge based on historical_roots & historical_summaries + ok() + +func validateCanonicalHeaderBytes*( + bytes: openArray[byte], id: uint64 | Hash32, a: FinishedHistoricalHashesAccumulator +): Result[Header, string] = + let headerWithProof = decodeSsz(bytes, BlockHeaderWithProof).valueOr: + return err("Failed decoding header with proof: " & error) + let header = ?validateHeaderBytes(headerWithProof.header.asSeq(), id) + + ?a.verifyBlockHeaderProof(header, headerWithProof.proof) + + ok(header) + +func validateBlockBody*( + body: PortalBlockBodyLegacy, header: Header +): Result[void, string] = + ## Validate the block body against the txRoot and ommersHash from the header. + let calculatedOmmersHash = keccak256(body.uncles.asSeq()) + if calculatedOmmersHash != header.ommersHash: + return err("Invalid ommers hash") + + let calculatedTxsRoot = orderedTrieRoot(body.transactions.asSeq) + if calculatedTxsRoot != header.txRoot: + return err( + "Invalid transactions root: expected " & $header.txRoot & " - got " & + $calculatedTxsRoot + ) + + ok() + +func validateBlockBody*( + body: PortalBlockBodyShanghai, header: Header +): Result[void, string] = + ## Validate the block body against the txRoot, ommersHash and withdrawalsRoot + ## from the header. + # Shortcut the ommersHash calculation as uncles must be an RLP encoded + # empty list + if body.uncles.asSeq() != @[byte 0xc0]: + return err("Invalid ommers hash, uncles list is not empty") + + let calculatedTxsRoot = orderedTrieRoot(body.transactions.asSeq) + if calculatedTxsRoot != header.txRoot: + return err( + "Invalid transactions root: expected " & $header.txRoot & " - got " & + $calculatedTxsRoot + ) + + # TODO: This check is done higher up but perhaps this can become cleaner with + # some refactor. + doAssert(header.withdrawalsRoot.isSome()) + + let + calculatedWithdrawalsRoot = orderedTrieRoot(body.withdrawals.asSeq) + headerWithdrawalsRoot = header.withdrawalsRoot.get() + if calculatedWithdrawalsRoot != headerWithdrawalsRoot: + return err( + "Invalid withdrawals root: expected " & $headerWithdrawalsRoot & " - got " & + $calculatedWithdrawalsRoot + ) + + ok() + +func validateBlockBodyBytes*( + bytes: openArray[byte], header: Header +): Result[BlockBody, string] = + ## Fully decode the SSZ encoded Portal Block Body and validate it against the + ## header. + ## TODO: improve this decoding in combination with the block body validation + ## calls. + let timestamp = Moment.init(header.timestamp.int64, Second) + # TODO: The additional header checks are not needed as header is implicitly + # verified by means of the accumulator? Except that we don't use this yet + # post merge, so the checks are still useful, for now. + if isShanghai(chainConfig, timestamp): + if header.withdrawalsRoot.isNone(): + err("Expected withdrawalsRoot for Shanghai block") + elif header.ommersHash != EMPTY_UNCLE_HASH: + err("Expected empty uncles for a Shanghai block") + else: + let body = ?decodeSsz(bytes, PortalBlockBodyShanghai) + ?validateBlockBody(body, header) + BlockBody.fromPortalBlockBody(body) + elif isPoSBlock(chainConfig, header.number): + if header.withdrawalsRoot.isSome(): + err("Expected no withdrawalsRoot for pre Shanghai block") + elif header.ommersHash != EMPTY_UNCLE_HASH: + err("Expected empty uncles for a PoS block") + else: + let body = ?decodeSsz(bytes, PortalBlockBodyLegacy) + ?validateBlockBody(body, header) + BlockBody.fromPortalBlockBody(body) + else: + if header.withdrawalsRoot.isSome(): + err("Expected no withdrawalsRoot for pre Shanghai block") + else: + let body = ?decodeSsz(bytes, PortalBlockBodyLegacy) + ?validateBlockBody(body, header) + BlockBody.fromPortalBlockBody(body) + +func validateReceipts*( + receipts: PortalReceipts, receiptsRoot: Hash32 +): Result[void, string] = + if orderedTrieRoot(receipts.asSeq) != receiptsRoot: + err("Unexpected receipt root") + else: + ok() + +func validateReceiptsBytes*( + bytes: openArray[byte], receiptsRoot: Hash32 +): Result[seq[Receipt], string] = + ## Fully decode the SSZ encoded receipts and validate it against the header's + ## receipts root. + let receipts = ?decodeSsz(bytes, PortalReceipts) + + ?validateReceipts(receipts, receiptsRoot) + + seq[Receipt].fromPortalReceipts(receipts) diff --git a/fluffy/network/history/validation/historical_hashes_accumulator.nim b/fluffy/network/history/validation/historical_hashes_accumulator.nim index f46d64d3dd..a6979aeced 100644 --- a/fluffy/network/history/validation/historical_hashes_accumulator.nim +++ b/fluffy/network/history/validation/historical_hashes_accumulator.nim @@ -175,24 +175,6 @@ func verifyAccumulatorProof*( else: err("Cannot verify post merge header with accumulator proof") -func verifyHeader*( - a: FinishedHistoricalHashesAccumulator, header: Header, proof: BlockHeaderProof -): Result[void, string] = - case proof.proofType - of BlockHeaderProofType.historicalHashesAccumulatorProof: - a.verifyAccumulatorProof(header, proof.historicalHashesAccumulatorProof) - of BlockHeaderProofType.none: - if header.isPreMerge(): - err("Pre merge header requires HistoricalHashesAccumulatorProof") - else: - # TODO: - # Currently there is no proof solution for verifying headers post-merge. - # Skipping canonical verification will allow for nodes to push block data - # that is not part of the canonical chain. - # For now we accept this flaw as the focus lies on testing data - # availability up to the head of the chain. - ok() - func buildProof*( header: Header, epochRecord: EpochRecord | EpochRecordCached ): Result[HistoricalHashesAccumulatorProof, string] = diff --git a/fluffy/network/state/state_network.nim b/fluffy/network/state/state_network.nim index 5211751a66..da01a73681 100644 --- a/fluffy/network/state/state_network.nim +++ b/fluffy/network/state/state_network.nim @@ -11,6 +11,7 @@ import results, chronos, chronicles, + metrics, eth/common/hashes, eth/p2p/discoveryv5/[protocol, enr], ../../database/content_db, @@ -25,6 +26,11 @@ export results, state_content, hashes logScope: topics = "portal_state" +declareCounter state_network_offers_success, + "Portal state network offers successfully validated", labels = ["protocol_id"] +declareCounter state_network_offers_failed, + "Portal state network offers which failed validation", labels = ["protocol_id"] + type StateNetwork* = ref object portalProtocol*: PortalProtocol contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])] @@ -58,6 +64,7 @@ proc new*( toContentIdHandler, createGetHandler(contentDB), createStoreHandler(contentDB, portalConfig.radiusConfig), + createContainsHandler(contentDB), createRadiusHandler(contentDB), s, bootstrapRecords, @@ -84,8 +91,7 @@ proc getContent( if maybeLocalContent.isSome(): let contentValue = V.decode(maybeLocalContent.get()).valueOr: - error "Unable to decode state local content value" - return Opt.none(V) + raiseAssert("Unable to decode state local content value") info "Fetched state local content value" return Opt.some(contentValue) @@ -100,11 +106,11 @@ proc getContent( contentValueBytes = contentLookupResult.content let contentValue = V.decode(contentValueBytes).valueOr: - warn "Unable to decode state content value from content lookup" + error "Unable to decode state content value from content lookup" continue validateRetrieval(key, contentValue).isOkOr: - warn "Validation of retrieved state content failed" + error "Validation of retrieved state content failed" continue info "Fetched valid state content from the network" @@ -178,7 +184,6 @@ proc processOffer*( n.portalProtocol.storeContent( contentKeyBytes, contentId, contentValue.toRetrievalValue().encode() ) - debug "Offered content validated successfully", contentKeyBytes await gossipOffer( n.portalProtocol, maybeSrcNodeId, contentKeyBytes, contentValueBytes @@ -218,11 +223,15 @@ proc processContentLoop(n: StateNetwork) {.async: (raises: []).} = srcNodeId, contentKeyBytes, contentBytes, contentKey.contractCodeKey, ContractCodeOffer, ) + if offerRes.isOk(): - info "Offered content processed successfully", contentKeyBytes + state_network_offers_success.inc(labelValues = [$n.portalProtocol.protocolId]) + debug "Received offered content validated successfully", + srcNodeId, contentKeyBytes else: - error "Offered content processing failed", - contentKeyBytes, error = offerRes.error() + state_network_offers_failed.inc(labelValues = [$n.portalProtocol.protocolId]) + error "Received offered content failed validation", + srcNodeId, contentKeyBytes, error = offerRes.error() except CancelledError: trace "processContentLoop canceled" diff --git a/fluffy/network/wire/portal_protocol.nim b/fluffy/network/wire/portal_protocol.nim index c26f8ced43..2fddca7304 100644 --- a/fluffy/network/wire/portal_protocol.nim +++ b/fluffy/network/wire/portal_protocol.nim @@ -47,7 +47,7 @@ declareHistogram portal_lookup_node_requests, labels = ["protocol_id"], buckets = requestBuckets declareHistogram portal_lookup_content_requests, - "Portal wire protocol amount of requests per node lookup", + "Portal wire protocol amount of requests per content lookup", labels = ["protocol_id"], buckets = requestBuckets declareCounter portal_lookup_content_failures, @@ -125,20 +125,6 @@ const ## value in milliseconds initialLookups = 1 ## Amount of lookups done when populating the routing table - # These are the concurrent offers per Portal wire protocol that is running. - # Using the `offerQueue` allows for limiting the amount of offers send and - # thus how many streams can be started. - # TODO: - # More thought needs to go into this as it is currently on a per network - # basis. Keep it simple like that? Or limit it better at the stream transport - # level? In the latter case, this might still need to be checked/blocked at - # the very start of sending the offer, because blocking/waiting too long - # between the received accept message and actually starting the stream and - # sending data could give issues due to timeouts on the other side. - # And then there are still limits to be applied also for FindContent and the - # incoming directions. - concurrentOffers = 50 - type ToContentIdHandler* = proc(contentKey: ContentKeyByteList): results.Opt[ContentId] {.raises: [], gcsafe.} @@ -151,13 +137,17 @@ type contentKey: ContentKeyByteList, contentId: ContentId, content: seq[byte] ) {.raises: [], gcsafe.} + DbContainsHandler* = proc(contentKey: ContentKeyByteList, contentId: ContentId): bool {. + raises: [], gcsafe + .} + DbRadiusHandler* = proc(): UInt256 {.raises: [], gcsafe.} PortalProtocolId* = array[2, byte] - RadiusCache* = LRUCache[NodeId, UInt256] + RadiusCache* = LruCache[NodeId, UInt256] - ContentCache = LRUCache[ContentId, seq[byte]] + ContentCache = LruCache[ContentId, seq[byte]] ContentKV* = object contentKey*: ContentKeyByteList @@ -183,6 +173,7 @@ type contentCache: ContentCache dbGet*: DbGetHandler dbPut*: DbStoreHandler + dbContains*: DbContainsHandler dataRadius*: DbRadiusHandler bootstrapRecords*: seq[Record] lastLookup: chronos.Moment @@ -319,7 +310,7 @@ func inRange( let distance = p.distance(nodeId, contentId) distance <= nodeRadius -proc inRange*(p: PortalProtocol, contentId: ContentId): bool = +template inRange*(p: PortalProtocol, contentId: ContentId): bool = p.inRange(p.localNode.id, p.dataRadius(), contentId) func truncateEnrs( @@ -474,7 +465,7 @@ proc handleOffer(p: PortalProtocol, o: OfferMessage, srcId: NodeId): seq[byte] = ) if p.inRange(contentId): - if p.dbGet(contentKey, contentId).isErr: + if not p.dbContains(contentKey, contentId): contentKeysBitList.setBit(i) discard contentKeys.add(contentKey) else: @@ -561,6 +552,7 @@ proc new*( toContentId: ToContentIdHandler, dbGet: DbGetHandler, dbPut: DbStoreHandler, + dbContains: DbContainsHandler, dbRadius: DbRadiusHandler, stream: PortalStream, bootstrapRecords: openArray[Record] = [], @@ -580,11 +572,12 @@ proc new*( ContentCache.init(if config.disableContentCache: 0 else: config.contentCacheSize), dbGet: dbGet, dbPut: dbPut, + dbContains: dbContains, dataRadius: dbRadius, bootstrapRecords: @bootstrapRecords, stream: stream, radiusCache: RadiusCache.init(256), - offerQueue: newAsyncQueue[OfferRequest](concurrentOffers), + offerQueue: newAsyncQueue[OfferRequest](config.maxConcurrentOffers), pingTimings: Table[NodeId, chronos.Moment](), config: config, ) @@ -978,9 +971,9 @@ proc offer( return ok(m.contentKeys) else: - warn "Offer failed due to accept request failure ", + debug "Offer failed due to accept request failure ", error = acceptMessageResponse.error - return err("No accept response") + return err("No or invalid accept response: " & acceptMessageResponse.error) proc offer*( p: PortalProtocol, dst: Node, contentKeys: ContentKeysList @@ -1132,14 +1125,31 @@ proc contentLookup*( p: PortalProtocol, target: ContentKeyByteList, targetId: UInt256 ): Future[Opt[ContentLookupResult]] {.async: (raises: [CancelledError]).} = ## Perform a lookup for the given target, return the closest n nodes to the - ## target. Maximum value for n is `BUCKET_SIZE`. + ## target. # `closestNodes` holds the k closest nodes to target found, sorted by distance # Unvalidated nodes are used for requests as a form of validation. var closestNodes = p.routingTable.neighbours(targetId, BUCKET_SIZE, seenOnly = false) + # Shuffling the order of the nodes in order to not always hit the same node # first for the same request. p.baseProtocol.rng[].shuffle(closestNodes) + # Sort closestNodes so that nodes that are in range of the target content + # are queried first + proc nodesCmp(x, y: Node): int = + let + xRadius = p.radiusCache.get(x.id) + yRadius = p.radiusCache.get(y.id) + + if xRadius.isSome() and p.inRange(x.id, xRadius.unsafeGet(), targetId): + -1 + elif yRadius.isSome() and p.inRange(y.id, yRadius.unsafeGet(), targetId): + 1 + else: + 0 + + closestNodes.sort(nodesCmp) + var asked, seen = HashSet[NodeId]() asked.incl(p.localNode.id) # No need to ask our own node seen.incl(p.localNode.id) # No need to discover our own node @@ -1627,7 +1637,6 @@ proc getLocalContent*( # Check first if content is in range, as this is a cheaper operation # than the database lookup. if p.inRange(contentId): - doAssert(p.dbGet != nil) p.dbGet(contentKey, contentId) else: Opt.none(seq[byte]) @@ -1735,7 +1744,19 @@ proc start*(p: PortalProtocol) = p.refreshLoop = refreshLoop(p) p.revalidateLoop = revalidateLoop(p) - for i in 0 ..< concurrentOffers: + # These are the concurrent offers per Portal wire protocol that is running. + # Using the `offerQueue` allows for limiting the amount of offers send and + # thus how many streams can be started. + # TODO: + # More thought needs to go into this as it is currently on a per network + # basis. Keep it simple like that? Or limit it better at the stream transport + # level? In the latter case, this might still need to be checked/blocked at + # the very start of sending the offer, because blocking/waiting too long + # between the received accept message and actually starting the stream and + # sending data could give issues due to timeouts on the other side. + # And then there are still limits to be applied also for FindContent and the + # incoming directions. + for i in 0 ..< p.config.maxConcurrentOffers: p.offerWorkers.add(offerWorker(p)) proc stop*(p: PortalProtocol) {.async: (raises: []).} = diff --git a/fluffy/network/wire/portal_protocol_config.nim b/fluffy/network/wire/portal_protocol_config.nim index da70a636f1..5824e8ed88 100644 --- a/fluffy/network/wire/portal_protocol_config.nim +++ b/fluffy/network/wire/portal_protocol_config.nim @@ -43,6 +43,7 @@ type maxGossipNodes*: int contentCacheSize*: int disableContentCache*: bool + maxConcurrentOffers*: int const defaultRadiusConfig* = RadiusConfig(kind: Dynamic) @@ -51,6 +52,7 @@ const defaultMaxGossipNodes* = 4 defaultContentCacheSize* = 100 defaultDisableContentCache* = false + defaultMaxConcurrentOffers* = 50 revalidationTimeout* = chronos.seconds(30) defaultPortalProtocolConfig* = PortalProtocolConfig( @@ -61,6 +63,7 @@ const maxGossipNodes: defaultMaxGossipNodes, contentCacheSize: defaultContentCacheSize, disableContentCache: defaultDisableContentCache, + maxConcurrentOffers: defaultMaxConcurrentOffers, ) proc init*( @@ -73,6 +76,7 @@ proc init*( maxGossipNodes: int, contentCacheSize: int, disableContentCache: bool, + maxConcurrentOffers: int, ): T = PortalProtocolConfig( tableIpLimits: @@ -83,6 +87,7 @@ proc init*( maxGossipNodes: maxGossipNodes, contentCacheSize: contentCacheSize, disableContentCache: disableContentCache, + maxConcurrentOffers: maxConcurrentOffers, ) func fromLogRadius*(T: type UInt256, logRadius: uint16): T = diff --git a/fluffy/portal_node.nim b/fluffy/portal_node.nim index 2d1736edf0..b9c50b5745 100644 --- a/fluffy/portal_node.nim +++ b/fluffy/portal_node.nim @@ -12,6 +12,7 @@ import chronos, eth/p2p/discoveryv5/protocol, beacon_chain/spec/forks, + stew/byteutils, ./network_metadata, ./eth_data/history_data_ssz_e2s, ./database/content_db, @@ -118,6 +119,8 @@ proc new*( beaconDb, streamManager, networkData.forks, + networkData.clock.getBeaconTimeFn(), + networkData.metadata.cfg, config.trustedBlockRoot, bootstrapRecords = bootstrapRecords, portalConfig = config.portalConfig, diff --git a/fluffy/rpc/portal_rpc_client.nim b/fluffy/rpc/portal_rpc_client.nim index a33d0836d8..55fe52d6ec 100644 --- a/fluffy/rpc/portal_rpc_client.nim +++ b/fluffy/rpc/portal_rpc_client.nim @@ -13,7 +13,7 @@ import eth/common/[headers_rlp, blocks_rlp, receipts_rlp], json_rpc/rpcclient, ../common/common_types, - ../network/history/[history_content, history_network], + ../network/history/[history_content, history_type_conversions, history_validation], ./rpc_calls/[rpc_discovery_calls, rpc_portal_calls, rpc_portal_debug_calls] export rpcclient, rpc_discovery_calls, rpc_portal_calls, rpc_portal_debug_calls, results @@ -48,15 +48,6 @@ func toPortalRpcError(e: ref CatchableError): PortalRpcError = else: raiseAssert(e.msg) -proc portal_historyLocalContent( - client: PortalRpcClient, contentKey: string -): Future[Result[string, PortalRpcError]] {.async: (raises: []).} = - try: - let content = await RpcClient(client).portal_historyLocalContent(contentKey) - ok(content) - except CatchableError as e: - err(e.toPortalRpcError()) - proc portal_historyGetContent( client: PortalRpcClient, contentKey: string ): Future[Result[string, PortalRpcError]] {.async: (raises: []).} = @@ -78,17 +69,6 @@ template valueOrErr[T](res: Result[T, string], error: PortalRpcError): auto = else: err(error) -proc historyGetContent( - client: PortalRpcClient, contentKey: string -): Future[Result[string, PortalRpcError]] {.async: (raises: []).} = - # Look up the content from the local db before trying to get it from the network - let content = (await client.portal_historyLocalContent(contentKey)).valueOr: - if error == ContentNotFound: - ?await client.portal_historyGetContent(contentKey) - else: - return err(error) - ok(content) - proc historyGetBlockHeader*( client: PortalRpcClient, blockHash: Hash32, validateContent = true ): Future[Result[Header, PortalRpcError]] {.async: (raises: []).} = @@ -104,13 +84,13 @@ proc historyGetBlockHeader*( let contentKey = blockHeaderContentKey(blockHash).encode().asSeq().to0xHex() - content = ?await client.historyGetContent(contentKey) + content = ?await client.portal_historyGetContent(contentKey) headerWithProof = decodeSsz(content.toBytes(), BlockHeaderWithProof).valueOr: return err(InvalidContentValue) headerBytes = headerWithProof.header.asSeq() if validateContent: - validateBlockHeaderBytes(headerBytes, blockHash).valueOrErr(ContentValidationFailed) + validateHeaderBytes(headerBytes, blockHash).valueOrErr(ContentValidationFailed) else: decodeRlp(headerBytes, Header).valueOrErr(InvalidContentValue) @@ -124,7 +104,7 @@ proc historyGetBlockBody*( let contentKey = blockBodyContentKey(blockHash).encode().asSeq().to0xHex() - content = ?await client.historyGetContent(contentKey) + content = ?await client.portal_historyGetContent(contentKey) if validateContent: let blockHeader = ?await client.historyGetBlockHeader(blockHash) @@ -132,7 +112,7 @@ proc historyGetBlockBody*( ContentValidationFailed ) else: - decodeBlockBodyBytes(content.toBytes()).valueOrErr(InvalidContentValue) + fromPortalBlockBodyBytes(content.toBytes()).valueOrErr(InvalidContentValue) proc historyGetReceipts*( client: PortalRpcClient, blockHash: Hash32, validateContent = true @@ -144,7 +124,7 @@ proc historyGetReceipts*( let contentKey = receiptsContentKey(blockHash).encode().asSeq().to0xHex() - content = ?await client.historyGetContent(contentKey) + content = ?await client.portal_historyGetContent(contentKey) if validateContent: let blockHeader = ?await client.historyGetBlockHeader(blockHash) diff --git a/fluffy/rpc/rpc_portal_history_api.nim b/fluffy/rpc/rpc_portal_history_api.nim index b74f08bed6..146db16cc4 100644 --- a/fluffy/rpc/rpc_portal_history_api.nim +++ b/fluffy/rpc/rpc_portal_history_api.nim @@ -81,9 +81,13 @@ proc installPortalHistoryApiHandlers*(rpcServer: RpcServer, p: PortalProtocol) = key = ContentKeyByteList.init(hexToSeqByte(contentKey)) contentId = p.toContentId(key).valueOr: raise invalidKeyErr() + maybeContent = p.getLocalContent(key, contentId) - contentResult = (await p.contentLookup(key, contentId)).valueOr: - raise contentNotFoundErr() + if maybeContent.isSome(): + return ContentInfo(content: maybeContent.get().to0xHex(), utpTransfer: false) + + let contentResult = (await p.contentLookup(key, contentId)).valueOr: + raise contentNotFoundErr() return ContentInfo( content: contentResult.content.to0xHex(), utpTransfer: contentResult.utpTransfer @@ -96,8 +100,12 @@ proc installPortalHistoryApiHandlers*(rpcServer: RpcServer, p: PortalProtocol) = key = ContentKeyByteList.init(hexToSeqByte(contentKey)) contentId = p.toContentId(key).valueOr: raise invalidKeyErr() + maybeContent = p.getLocalContent(key, contentId) + + if maybeContent.isSome(): + return TraceContentLookupResult(content: maybeContent, utpTransfer: false) - res = await p.traceContentLookup(key, contentId) + let res = await p.traceContentLookup(key, contentId) # TODO: Might want to restructure the lookup result here. Potentially doing # the json conversion in this module. diff --git a/fluffy/rpc/rpc_portal_nimbus_beacon_api.nim b/fluffy/rpc/rpc_portal_nimbus_beacon_api.nim new file mode 100644 index 0000000000..c0caa19a34 --- /dev/null +++ b/fluffy/rpc/rpc_portal_nimbus_beacon_api.nim @@ -0,0 +1,19 @@ +# nimbus +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [].} + +import json_rpc/rpcserver, ../network/beacon/beacon_network + +export rpcserver + +# Nimbus/fluffy specific RPC methods for the Portal beacon network. +proc installPortalNimbusBeaconApiHandlers*(rpcServer: RpcServer, n: BeaconNetwork) = + rpcServer.rpc("portal_nimbus_beaconSetTrustedBlockRoot") do(blockRoot: string) -> bool: + let root = Digest.fromHex(blockRoot) + n.trustedBlockRoot = Opt.some(root) + true diff --git a/fluffy/scripts/test_portal_testnet.nim b/fluffy/scripts/test_portal_testnet.nim index d9401d7078..c7ec1fe94f 100644 --- a/fluffy/scripts/test_portal_testnet.nim +++ b/fluffy/scripts/test_portal_testnet.nim @@ -14,7 +14,7 @@ import chronos, stew/byteutils, eth/p2p/discoveryv5/random2, - eth/keys, + eth/common/keys, ../common/common_types, ../rpc/portal_rpc_client, ../rpc/eth_rpc_client, @@ -79,7 +79,8 @@ proc withRetries[A]( if tries > numRetries: # if we reached max number of retries fail let msg = - "Call failed with msg: " & exc.msg & ", for node with idx: " & $nodeIdx + "Call failed with msg: " & exc.msg & ", for node with idx: " & $nodeIdx & + ", after " & $tries & " tries." raise newException(ValueError, msg) inc tries @@ -94,7 +95,7 @@ proc retryUntil[A]( f: FutureCallback[A], c: CheckCallback[A], checkFailMessage: string, nodeIdx: int ): Future[A] = # some reasonable limits, which will cause waits as: 1, 2, 4, 8, 16, 32 seconds - return withRetries(f, c, 1, seconds(1), checkFailMessage, nodeIdx) + return withRetries(f, c, 3, seconds(1), checkFailMessage, nodeIdx) # Note: # When doing json-rpc requests following `RpcPostError` can occur: @@ -261,9 +262,20 @@ procSuite "Portal testnet tests": # Gossiping all block headers with proof first, as bodies and receipts # require them for validation. - for (content, contentKey) in blockHeadersWithProof: - discard - (await clients[0].portal_historyGossip(content.toHex(), contentKey.toHex())) + for (contentKey, contentValue) in blockHeadersWithProof: + discard ( + await clients[0].portal_historyGossip(contentKey.toHex(), contentValue.toHex()) + ) + + # TODO: Fix iteration order: Because the blockData gets parsed into a + # BlockDataTable, iterating over this result in gossiping the block bodies + # and receipts of block in a different order than the headers. + # Because of this, block bodies and receipts for block + # 0x6251d65b8a8668efabe2f89c96a5b6332d83b3bbe585089ea6b2ab9b6754f5e9 + # come right after the headers with proof. This is likely to cause validation + # failures on the nodes, as the block bodies and receipts require the header + # to get validated. + await sleepAsync(seconds(1)) # Gossiping all block bodies and receipts. for b in blocks(blockData, false): diff --git a/fluffy/tests/beacon_network_tests/beacon_test_helpers.nim b/fluffy/tests/beacon_network_tests/beacon_test_helpers.nim index 85eb3a351b..e7e4e11cc7 100644 --- a/fluffy/tests/beacon_network_tests/beacon_test_helpers.nim +++ b/fluffy/tests/beacon_network_tests/beacon_test_helpers.nim @@ -28,7 +28,14 @@ proc newLCNode*( db = BeaconDb.new(networkData, "", inMemory = true) streamManager = StreamManager.new(node) network = BeaconNetwork.new( - PortalNetwork.none, node, db, streamManager, networkData.forks, trustedBlockRoot + PortalNetwork.none, + node, + db, + streamManager, + networkData.forks, + networkData.clock.getBeaconTimeFn(), + networkData.metadata.cfg, + trustedBlockRoot, ) return BeaconNode(discoveryProtocol: node, beaconNetwork: network) diff --git a/fluffy/tests/history_network_tests/test_history_content.nim b/fluffy/tests/history_network_tests/test_history_content.nim index d9af9e576c..bdd99c87ad 100644 --- a/fluffy/tests/history_network_tests/test_history_content.nim +++ b/fluffy/tests/history_network_tests/test_history_content.nim @@ -15,8 +15,7 @@ import eth/common/headers_rlp, ../../network_metadata, ../../eth_data/[history_data_json_store, history_data_ssz_e2s], - ../../network/history/ - [history_content, history_network, validation/historical_hashes_accumulator], + ../../network/history/[history_content, history_type_conversions, history_validation], ../../eth_data/yaml_utils, ./test_history_util @@ -73,7 +72,7 @@ suite "History Content Values": check res.isOk() let header = res.get() - check accumulator.verifyHeader(header, blockHeaderWithProof.proof).isOk() + check accumulator.verifyBlockHeaderProof(header, blockHeaderWithProof.proof).isOk() # Encode content check: @@ -108,7 +107,9 @@ suite "History Content Values": check res.isOk() let header = res.get() - check accumulator.verifyHeader(header, blockHeaderWithProof.proof).isOk() + check accumulator + .verifyBlockHeaderProof(header, blockHeaderWithProof.proof) + .isOk() # Encode content check: @@ -172,7 +173,7 @@ suite "History Content Values": check contentKey.isOk() # Decode (SSZ + RLP decode step) and validate block body - let contentValue = decodeBlockBodyBytes(contentValueEncoded) + let contentValue = fromPortalBlockBodyBytes(contentValueEncoded) check contentValue.isOk() # Encode content and content key diff --git a/fluffy/tests/history_network_tests/test_history_content_validation.nim b/fluffy/tests/history_network_tests/test_history_content_validation.nim index 04de1c6b65..bd9f896535 100644 --- a/fluffy/tests/history_network_tests/test_history_content_validation.nim +++ b/fluffy/tests/history_network_tests/test_history_content_validation.nim @@ -17,7 +17,7 @@ import eth/common/headers_rlp, ../../common/common_types, ../../eth_data/history_data_json_store, - ../../network/history/history_network + ../../network/history/[history_type_conversions, history_validation] const dataFile = "./fluffy/tests/blocks/mainnet_blocks_selected.json" @@ -51,12 +51,12 @@ suite "History Content Values Validation": ) test "Valid Header": - check validateBlockHeaderBytes(blockHeaderBytes, blockHash).isOk() + check validateHeaderBytes(blockHeaderBytes, blockHash).isOk() test "Malformed Header": let malformedBytes = blockHeaderBytes[10 .. blockHeaderBytes.high] - check validateBlockHeaderBytes(malformedBytes, blockHash).isErr() + check validateHeaderBytes(malformedBytes, blockHash).isErr() test "Invalid Header - Different gasUsed": var modifiedHeader = blockHeader @@ -65,7 +65,7 @@ suite "History Content Values Validation": let modifiedHeaderBytes = rlp.encode(modifiedHeader) - check validateBlockHeaderBytes(modifiedHeaderBytes, blockHash).isErr() + check validateHeaderBytes(modifiedHeaderBytes, blockHash).isErr() test "Valid Block Body": check validateBlockBodyBytes(blockBodyBytes, blockHeader).isOk() diff --git a/fluffy/tests/history_network_tests/test_history_network.nim b/fluffy/tests/history_network_tests/test_history_network.nim index d942852372..4df06495fa 100644 --- a/fluffy/tests/history_network_tests/test_history_network.nim +++ b/fluffy/tests/history_network_tests/test_history_network.nim @@ -54,7 +54,19 @@ proc stop(hn: HistoryNode) {.async.} = await hn.discoveryProtocol.closeWait() proc containsId(hn: HistoryNode, contentId: ContentId): bool = - return hn.historyNetwork.contentDB.get(contentId).isSome() + hn.historyNetwork.contentDB.contains(contentId) + +proc checkContainsIdWithRetry( + historyNode: HistoryNode, id: ContentId +) {.async: (raises: [CancelledError]).} = + var res = false + for i in 0 .. 50: + res = historyNode.containsId(id) + if res: + break + await sleepAsync(10.milliseconds) + + check res proc createEmptyHeaders(fromNum: int, toNum: int): seq[Header] = var headers: seq[Header] @@ -216,17 +228,10 @@ procSuite "History Content Network": while not historyNode2.historyNetwork.contentQueue.empty(): await sleepAsync(1.milliseconds) - # Note: It seems something changed in chronos, causing different behavior. - # Seems that validateContent called through processContentLoop used to - # run immediatly in case of a "non async shortpath". This is no longer the - # case and causes the content not yet to be validated and thus stored at - # this step. Add an await here so that the store can happen. - await sleepAsync(100.milliseconds) - for i, contentKV in contentKVs: let id = toContentId(contentKV.contentKey) if i < len(contentKVs) - 1: - check historyNode2.containsId(id) == true + await historyNode2.checkContainsIdWithRetry(id) else: check historyNode2.containsId(id) == false @@ -283,11 +288,9 @@ procSuite "History Content Network": while not historyNode2.historyNetwork.contentQueue.empty(): await sleepAsync(1.milliseconds) - await sleepAsync(100.milliseconds) - for contentKV in contentKVs: let id = toContentId(contentKV.contentKey) - check historyNode2.containsId(id) == true + await historyNode2.checkContainsIdWithRetry(id) await historyNode1.stop() await historyNode2.stop() diff --git a/fluffy/tests/rpc_tests/test_portal_rpc_client.nim b/fluffy/tests/rpc_tests/test_portal_rpc_client.nim index ea4c5187c8..bb8b9a6b96 100644 --- a/fluffy/tests/rpc_tests/test_portal_rpc_client.nim +++ b/fluffy/tests/rpc_tests/test_portal_rpc_client.nim @@ -19,7 +19,7 @@ import eth/p2p/discoveryv5/protocol as discv5_protocol, ../../network/wire/[portal_protocol, portal_stream, portal_protocol_config], ../../network/history/ - [history_network, history_content, validation/historical_hashes_accumulator], + [history_network, history_content, history_type_conversions, history_validation], ../../database/content_db, ../../rpc/[portal_rpc_client, rpc_portal_history_api], ../test_helpers @@ -57,7 +57,7 @@ proc stop(hn: HistoryNode) {.async.} = await hn.discoveryProtocol.closeWait() proc containsId(hn: HistoryNode, contentId: ContentId): bool = - return hn.historyNetwork.contentDB.get(contentId).isSome() + return hn.historyNetwork.contentDB.contains(contentId) proc store*(hn: HistoryNode, blockHash: Hash32, blockHeader: Header) = let diff --git a/fluffy/tests/state_network_tests/state_test_helpers.nim b/fluffy/tests/state_network_tests/state_test_helpers.nim index acca9181f1..01d76faef9 100644 --- a/fluffy/tests/state_network_tests/state_test_helpers.nim +++ b/fluffy/tests/state_network_tests/state_test_helpers.nim @@ -16,7 +16,7 @@ import eth/p2p/discoveryv5/routing_table, ../../network/wire/[portal_protocol, portal_stream, portal_protocol_config], ../../../nimbus/common/chain_config, - ../../network/history/[history_content, history_network], + ../../network/history/[history_content, history_network, history_validation], ../../network/state/[state_content, state_utils, state_network], ../../eth_data/yaml_utils, ../../database/content_db, @@ -142,11 +142,10 @@ proc stop*(sn: StateNode) {.async.} = await sn.discoveryProtocol.closeWait() proc containsId*(sn: StateNode, contentId: ContentId): bool {.inline.} = - return sn.stateNetwork.portalProtocol - # The contentKey parameter isn't used but is required for compatibility - # with the dbGet handler inside getLocalContent. - .getLocalContent(ContentKeyByteList.init(@[]), contentId) - .isSome() + # The contentKey parameter isn't used but is required for compatibility with + # the dbContains handler + return + sn.stateNetwork.portalProtocol.dbContains(ContentKeyByteList.init(@[]), contentId) proc mockStateRootLookup*( sn: StateNode, blockNumOrHash: uint64 | Hash32, stateRoot: Hash32 diff --git a/fluffy/tests/test_content_db.nim b/fluffy/tests/test_content_db.nim index d1093c7328..fac0ac0075 100644 --- a/fluffy/tests/test_content_db.nim +++ b/fluffy/tests/test_content_db.nim @@ -26,26 +26,37 @@ suite "Content Database": key = ContentId(UInt256.high()) # Some key block: - let val = db.get(key) + var val = Opt.none(seq[byte]) + proc onData(data: openArray[byte]) = + val = Opt.some(@data) check: + db.get(key, onData) == false val.isNone() db.contains(key) == false block: discard db.putAndPrune(key, [byte 0, 1, 2, 3]) - let val = db.get(key) + + var val = Opt.none(seq[byte]) + proc onData(data: openArray[byte]) = + val = Opt.some(@data) check: + db.get(key, onData) == true val.isSome() val.get() == [byte 0, 1, 2, 3] db.contains(key) == true block: db.del(key) - let val = db.get(key) + + var val = Opt.none(seq[byte]) + proc onData(data: openArray[byte]) = + val = Opt.some(@data) check: + db.get(key, onData) == false val.isNone() db.contains(key) == false @@ -137,9 +148,9 @@ suite "Content Database": # With the current settings the 2 furthest elements will be deleted, # i.e key 30 and 40. The furthest non deleted one will have key 20. pr10.distanceOfFurthestElement == thirdFurthest - db.get(furthestElement).isNone() - db.get(secondFurthest).isNone() - db.get(thirdFurthest).isSome() + not db.contains(furthestElement) + not db.contains(secondFurthest) + db.contains(thirdFurthest) test "ContentDB force pruning": const diff --git a/fluffy/tests/wire_protocol_tests/test_portal_wire_protocol.nim b/fluffy/tests/wire_protocol_tests/test_portal_wire_protocol.nim index 11d7b3c72e..a18aeecf96 100644 --- a/fluffy/tests/wire_protocol_tests/test_portal_wire_protocol.nim +++ b/fluffy/tests/wire_protocol_tests/test_portal_wire_protocol.nim @@ -50,6 +50,7 @@ proc initPortalProtocol( toContentId, createGetHandler(db), createStoreHandler(db, defaultRadiusConfig), + createContainsHandler(db), createRadiusHandler(db), stream, bootstrapRecords = bootstrapRecords, @@ -346,6 +347,7 @@ procSuite "Portal Wire Protocol Tests": toContentId, createGetHandler(db), createStoreHandler(db, defaultRadiusConfig), + createContainsHandler(db), createRadiusHandler(db), stream, ) @@ -364,10 +366,10 @@ procSuite "Portal Wire Protocol Tests": # Index 2 should be still be in database and its distance should be <= # updated radius check: - db.get((distances[0] xor proto1.localNode.id)).isNone() - db.get((distances[1] xor proto1.localNode.id)).isNone() - db.get((distances[2] xor proto1.localNode.id)).isNone() - db.get((distances[3] xor proto1.localNode.id)).isSome() + not db.contains((distances[0] xor proto1.localNode.id)) + not db.contains((distances[1] xor proto1.localNode.id)) + not db.contains((distances[2] xor proto1.localNode.id)) + db.contains((distances[3] xor proto1.localNode.id)) # The radius has been updated and is lower than the maximum start value. proto1.dataRadius() < UInt256.high # Yet higher than or equal to the furthest non deleted element. diff --git a/fluffy/tools/beacon_lc_bridge/beacon_lc_bridge.nim b/fluffy/tools/beacon_lc_bridge/beacon_lc_bridge.nim index 1a8ada2ba0..938880739f 100644 --- a/fluffy/tools/beacon_lc_bridge/beacon_lc_bridge.nim +++ b/fluffy/tools/beacon_lc_bridge/beacon_lc_bridge.nim @@ -62,17 +62,17 @@ import from beacon_chain/gossip_processing/block_processor import newExecutionPayload from beacon_chain/gossip_processing/eth2_processor import toValidationResult -template append(w: var RlpWriter, t: TypedTransaction) = - w.appendRawBytes(distinctBase t) +template append(w: var RlpWriter, typedTransaction: TypedTransaction) = + w.appendRawBytes(distinctBase typedTransaction) -template append(w: var RlpWriter, t: WithdrawalV1) = +template append(w: var RlpWriter, withdrawalV1: WithdrawalV1) = # TODO: Since Capella we can also access ExecutionPayloadHeader and thus # could get the Roots through there instead. w.append blocks.Withdrawal( - index: distinctBase(t.index), - validatorIndex: distinctBase(t.validatorIndex), - address: t.address, - amount: distinctBase(t.amount), + index: distinctBase(withdrawalV1.index), + validatorIndex: distinctBase(withdrawalV1.validatorIndex), + address: withdrawalV1.address, + amount: distinctBase(withdrawalV1.amount), ) proc asPortalBlockData*( diff --git a/fluffy/tools/fcli_db.nim b/fluffy/tools/fcli_db.nim index 70a2318597..93c99f6a83 100644 --- a/fluffy/tools/fcli_db.nim +++ b/fluffy/tools/fcli_db.nim @@ -108,7 +108,11 @@ proc cmdBench(conf: DbConf) = for key in keys: withTimer(timers[tDbGet]): - let _ = db.get(key) + var val = Opt.none(seq[byte]) + proc onData(data: openArray[byte]) = + val = Opt.some(@data) + + let _ = db.get(key, onData) for key in keys: withTimer(timers[tDbContains]): diff --git a/fluffy/tools/portal_bridge/portal_bridge_conf.nim b/fluffy/tools/portal_bridge/portal_bridge_conf.nim index 935ac32abf..5ea4281d45 100644 --- a/fluffy/tools/portal_bridge/portal_bridge_conf.nim +++ b/fluffy/tools/portal_bridge/portal_bridge_conf.nim @@ -12,6 +12,8 @@ import confutils, confutils/std/net, nimcrypto/hash, + ../../network_metadata, + ../../eth_data/era1, ../../[conf, logging] export net @@ -36,6 +38,8 @@ proc defaultPortalBridgeStateDir*(): string = else: defaultDataDir() / "bridge" / "state" +const defaultEndEra* = uint64(era(network_metadata.mergeBlockNumber - 1)) + type TrustedDigest* = MDigest[32 * 8] @@ -117,6 +121,13 @@ type name: "backfill" .}: bool + startEra* {.desc: "The era to start from", defaultValue: 0, name: "start-era".}: + uint64 + + endEra* {. + desc: "The era to stop at", defaultValue: defaultEndEra, name: "end-era" + .}: uint64 + audit* {. desc: "Run pre-merge backfill in audit mode, which will only gossip content that if failed to fetch from the network", @@ -130,6 +141,13 @@ type defaultValueDesc: defaultEra1DataDir(), name: "era1-dir" .}: InputDir + + gossipConcurrency* {. + desc: + "The number of concurrent gossip workers for gossiping content into the portal network", + defaultValue: 50, + name: "gossip-concurrency" + .}: int of PortalBridgeCmd.state: web3UrlState* {.desc: "Execution layer JSON-RPC API URL", name: "web3-url".}: JsonRpcUrl @@ -173,6 +191,13 @@ type name: "verify-gossip" .}: bool + skipGossipForExisting* {. + desc: + "Enable skipping gossip of each content value which is successfully fetched from the network", + defaultValue: true, + name: "skip-gossip-for-existing" + .}: bool + gossipWorkersCount* {. desc: "The number of workers to use for gossiping the state into the portal network", diff --git a/fluffy/tools/portal_bridge/portal_bridge_history.nim b/fluffy/tools/portal_bridge/portal_bridge_history.nim index f3455996ae..3d1f16fa99 100644 --- a/fluffy/tools/portal_bridge/portal_bridge_history.nim +++ b/fluffy/tools/portal_bridge/portal_bridge_history.nim @@ -15,12 +15,12 @@ import results, stew/byteutils, eth/common/keys, - eth/common/[base, headers_rlp, blocks_rlp], + eth/common/[base, headers_rlp, blocks_rlp, receipts], eth/p2p/discoveryv5/random2, ../../../nimbus/beacon/web3_eth_conv, ../../../hive_integration/nodocker/engine/engine_client, ../../rpc/portal_rpc_client, - ../../network/history/[history_content, history_network], + ../../network/history/[history_content, history_type_conversions, history_validation], ../../network_metadata, ../../eth_data/[era1, history_data_ssz_e2s, history_data_seeding], ../../database/era1_db, @@ -31,6 +31,11 @@ from eth/common/eth_types_rlp import rlpHash const newHeadPollInterval = 6.seconds # Slot with potential block is every 12s +type PortalHistoryBridge = ref object + portalClient: RpcClient + web3Client: RpcClient + gossipQueue: AsyncQueue[(seq[byte], seq[byte])] + ## Conversion functions for Block and Receipts func asEthBlock(blockObject: BlockObject): EthBlock = @@ -79,9 +84,9 @@ func asReceipt(receiptObject: ReceiptObject): Result[Receipt, string] = var logs: seq[Log] if receiptObject.logs.len > 0: for log in receiptObject.logs: - var topics: seq[Topic] + var topics: seq[receipts.Topic] for topic in log.topics: - topics.add(Topic(topic)) + topics.add(topic) logs.add(Log(address: log.address, data: log.data, topics: topics)) @@ -139,63 +144,34 @@ proc getBlockReceipts( ## Portal JSON-RPC API helper calls for pushing block and receipts proc gossipBlockHeader( - client: RpcClient, id: Hash32 | uint64, headerWithProof: BlockHeaderWithProof -): Future[Result[void, string]] {.async: (raises: []).} = - let - contentKey = blockHeaderContentKey(id) - encodedContentKeyHex = contentKey.encode.asSeq().toHex() + bridge: PortalHistoryBridge, + id: Hash32 | uint64, + headerWithProof: BlockHeaderWithProof, +): Future[void] {.async: (raises: [CancelledError]).} = + let contentKey = blockHeaderContentKey(id) - peers = - try: - await client.portal_historyGossip( - encodedContentKeyHex, SSZ.encode(headerWithProof).toHex() - ) - except CatchableError as e: - return err("JSON-RPC portal_historyGossip failed: " & $e.msg) - - info "Block header gossiped", peers, contentKey = encodedContentKeyHex - return ok() + await bridge.gossipQueue.addLast( + (contentKey.encode.asSeq(), SSZ.encode(headerWithProof)) + ) proc gossipBlockBody( - client: RpcClient, + bridge: PortalHistoryBridge, hash: Hash32, body: PortalBlockBodyLegacy | PortalBlockBodyShanghai, -): Future[Result[void, string]] {.async: (raises: []).} = - let - contentKey = blockBodyContentKey(hash) - encodedContentKeyHex = contentKey.encode.asSeq().toHex() +): Future[void] {.async: (raises: [CancelledError]).} = + let contentKey = blockBodyContentKey(hash) - peers = - try: - await client.portal_historyGossip( - encodedContentKeyHex, SSZ.encode(body).toHex() - ) - except CatchableError as e: - return err("JSON-RPC portal_historyGossip failed: " & $e.msg) - - info "Block body gossiped", peers, contentKey = encodedContentKeyHex - return ok() + await bridge.gossipQueue.addLast((contentKey.encode.asSeq(), SSZ.encode(body))) proc gossipReceipts( - client: RpcClient, hash: Hash32, receipts: PortalReceipts -): Future[Result[void, string]] {.async: (raises: []).} = - let - contentKey = receiptsContentKey(hash) - encodedContentKeyHex = contentKey.encode.asSeq().toHex() - - peers = - try: - await client.portal_historyGossip( - encodedContentKeyHex, SSZ.encode(receipts).toHex() - ) - except CatchableError as e: - return err("JSON-RPC portal_historyGossip failed: " & $e.msg) + bridge: PortalHistoryBridge, hash: Hash32, receipts: PortalReceipts +): Future[void] {.async: (raises: [CancelledError]).} = + let contentKey = receiptsContentKey(hash) - info "Receipts gossiped", peers, contentKey = encodedContentKeyHex - return ok() + await bridge.gossipQueue.addLast((contentKey.encode.asSeq(), SSZ.encode(receipts))) proc runLatestLoop( - portalClient: RpcClient, web3Client: RpcClient, validate = false + bridge: PortalHistoryBridge, validate = false ) {.async: (raises: [CancelledError]).} = ## Loop that requests the latest block + receipts and pushes them into the ## Portal network. @@ -211,14 +187,14 @@ proc runLatestLoop( var lastBlockNumber = 0'u64 while true: let t0 = Moment.now() - let blockObject = (await getBlockByNumber(web3Client, blockId)).valueOr: + let blockObject = (await bridge.web3Client.getBlockByNumber(blockId)).valueOr: error "Failed to get latest block", error await sleepAsync(1.seconds) continue let blockNumber = distinctBase(blockObject.number) if blockNumber > lastBlockNumber: - let receiptObjects = (await web3Client.getBlockReceipts(blockNumber)).valueOr: + let receiptObjects = (await bridge.web3Client.getBlockReceipts(blockNumber)).valueOr: error "Failed to get latest receipts", error await sleepAsync(1.seconds) continue @@ -239,7 +215,7 @@ proc runLatestLoop( let hash = blockObject.hash if validate: - if validateBlockHeaderBytes(headerWithProof.header.asSeq(), hash).isErr(): + if validateHeaderBytes(headerWithProof.header.asSeq(), hash).isErr(): error "Block header is invalid" continue if validateBlockBody(body, ethBlock.header).isErr(): @@ -250,11 +226,9 @@ proc runLatestLoop( continue # gossip block header by hash - (await portalClient.gossipBlockHeader(hash, headerWithProof)).isOkOr: - error "Failed to gossip block header", error, hash + await bridge.gossipBlockHeader(hash, headerWithProof) # gossip block header by number - (await portalClient.gossipBlockHeader(blockNumber, headerWithProof)).isOkOr: - error "Failed to gossip block header", error, hash + await bridge.gossipBlockHeader(blockNumber, headerWithProof) # For bodies & receipts to get verified, the header needs to be available # on the network. Wait a little to get the headers propagated through @@ -262,12 +236,9 @@ proc runLatestLoop( await sleepAsync(2.seconds) # gossip block body - (await portalClient.gossipBlockBody(hash, body)).isOkOr: - error "Failed to gossip block body", error, hash - + await bridge.gossipBlockBody(hash, body) # gossip receipts - (await portalClient.gossipReceipts(hash, portalReceipts)).isOkOr: - error "Failed to gossip receipts", error, hash + await bridge.gossipReceipts(hash, portalReceipts) # Making sure here that we poll enough times not to miss a block. # We could also do some work without awaiting it, e.g. the gossiping or @@ -281,11 +252,11 @@ proc runLatestLoop( warn "Block gossip took longer than slot interval" proc gossipHeadersWithProof( - portalClient: RpcClient, + bridge: PortalHistoryBridge, era1File: string, epochRecordFile: Opt[string] = Opt.none(string), verifyEra = false, -): Future[Result[void, string]] {.async: (raises: []).} = +): Future[Result[void, string]] {.async: (raises: [CancelledError]).} = let f = ?Era1File.open(era1File) if verifyEra: @@ -296,54 +267,59 @@ proc gossipHeadersWithProof( # UX hassle it adds to provide the accumulator ssz files. let epochRecord = if epochRecordFile.isNone: + info "Building accumulator from era1 file", era1File ?f.buildAccumulator() else: ?readEpochRecordCached(epochRecordFile.get()) - for (contentKey, contentValue) in f.headersWithProof(epochRecord): - let peers = - try: - await portalClient.portal_historyGossip( - contentKey.asSeq.toHex(), contentValue.toHex() - ) - except CatchableError as e: - return err("JSON-RPC portal_historyGossip failed: " & $e.msg) - info "Block header gossiped", peers, contentKey + info "Gossip headers from era1 file", era1File + + for blockHeader in f.era1BlockHeaders: + doAssert blockHeader.isPreMerge() + + let + headerWithProof = buildHeaderWithProof(blockHeader, epochRecord).valueOr: + raiseAssert "Failed to build header with proof: " & $blockHeader.number + blockHash = blockHeader.rlpHash() + # gossip block header by hash + await bridge.gossipBlockHeader(blockHash, headerWithProof) + # gossip block header by number + await bridge.gossipBlockHeader(blockHeader.number, headerWithProof) + + info "Succesfully put headers from era1 file in gossip queue", era1File ok() proc gossipBlockContent( - portalClient: RpcClient, era1File: string, verifyEra = false -): Future[Result[void, string]] {.async: (raises: []).} = + bridge: PortalHistoryBridge, era1File: string, verifyEra = false +): Future[Result[void, string]] {.async: (raises: [CancelledError]).} = let f = ?Era1File.open(era1File) if verifyEra: let _ = ?f.verify() - for (contentKey, contentValue) in f.blockContent(): - let peers = - try: - await portalClient.portal_historyGossip( - contentKey.asSeq.toHex(), contentValue.toHex() - ) - except CatchableError as e: - return err("JSON-RPC portal_historyGossip failed: " & $e.msg) - info "Block content gossiped", peers, contentKey + info "Gossip bodies and receipts from era1 file", era1File + + for (header, body, receipts, _) in f.era1BlockTuples: + let blockHash = header.rlpHash() + + # gossip block body + await bridge.gossipBlockBody(blockHash, PortalBlockBodyLegacy.fromBlockBody(body)) + # gossip receipts + await bridge.gossipReceipts(blockHash, PortalReceipts.fromReceipts(receipts)) + info "Succesfully put bodies and receipts from era1 file in gossip queue", era1File ok() proc runBackfillLoop( - portalClient: RpcClient, web3Client: RpcClient, era1Dir: string + bridge: PortalHistoryBridge, era1Dir: string, startEra: uint64, endEra: uint64 ) {.async: (raises: [CancelledError]).} = - let - rng = newRng() - accumulator = loadAccumulator() - while true: + let accumulator = loadAccumulator() + + for era in startEra .. endEra: let - # Grab a random era1 to backfill - era = rng[].rand(int(era(network_metadata.mergeBlockNumber - 1))) root = accumulator.historicalEpochs[era] - eraFile = era1Dir / era1FileName("mainnet", Era1(era), Digest(data: root)) + era1File = era1Dir / era1FileName("mainnet", Era1(era), Digest(data: root)) # Note: # There are two design options here: @@ -360,42 +336,38 @@ proc runBackfillLoop( # new era1 can be gossiped (might need another custom json-rpc that checks # the offer queue) when false: - info "Gossip headers from era1 file", eraFile + info "Gossip headers from era1 file", era1File let headerRes = try: - await portalClient.portal_debug_historyGossipHeaders(eraFile) + await bridge.portalClient.portal_debug_historyGossipHeaders(era1File) except CatchableError as e: error "JSON-RPC portal_debug_historyGossipHeaders failed", error = e.msg false if headerRes: - info "Gossip block content from era1 file", eraFile + info "Gossip block content from era1 file", era1File let res = try: - await portalClient.portal_debug_historyGossipBlockContent(eraFile) + await bridge.portalClient.portal_debug_historyGossipBlockContent(era1File) except CatchableError as e: error "JSON-RPC portal_debug_historyGossipBlockContent failed", error = e.msg false if res: - error "Failed to gossip block content from era1 file", eraFile + error "Failed to gossip block content from era1 file", era1File else: - error "Failed to gossip headers from era1 file", eraFile + error "Failed to gossip headers from era1 file", era1File else: - info "Gossip headers from era1 file", eraFile - (await portalClient.gossipHeadersWithProof(eraFile)).isOkOr: - error "Failed to gossip headers from era1 file", error, eraFile + (await bridge.gossipHeadersWithProof(era1File)).isOkOr: + error "Failed to gossip headers from era1 file", error, era1File continue - info "Gossip block content from era1 file", eraFile - (await portalClient.gossipBlockContent(eraFile)).isOkOr: - error "Failed to gossip block content from era1 file", error, eraFile + (await bridge.gossipBlockContent(era1File)).isOkOr: + error "Failed to gossip block content from era1 file", error, era1File continue - info "Succesfully gossiped era1 file", eraFile - proc runBackfillLoopAuditMode( - portalClient: RpcClient, web3Client: RpcClient, era1Dir: string + bridge: PortalHistoryBridge, era1Dir: string ) {.async: (raises: [CancelledError]).} = let rng = newRng() @@ -422,7 +394,7 @@ proc runBackfillLoopAuditMode( contentHex = try: ( - await portalClient.portal_historyGetContent( + await bridge.portalClient.portal_historyGetContent( contentKey.encode.asSeq().toHex() ) ).content @@ -454,7 +426,7 @@ proc runBackfillLoopAuditMode( contentHex = try: ( - await portalClient.portal_historyGetContent( + await bridge.portalClient.portal_historyGetContent( contentKey.encode.asSeq().toHex() ) ).content @@ -482,7 +454,7 @@ proc runBackfillLoopAuditMode( contentHex = try: ( - await portalClient.portal_historyGetContent( + await bridge.portalClient.portal_historyGetContent( contentKey.encode.asSeq().toHex() ) ).content @@ -512,43 +484,59 @@ proc runBackfillLoopAuditMode( raiseAssert "Failed to build header with proof: " & error # gossip block header by hash - (await portalClient.gossipBlockHeader(blockHash, headerWithProof)).isOkOr: - error "Failed to gossip block header", error, blockHash + await bridge.gossipBlockHeader(blockHash, headerWithProof) # gossip block header by number - (await portalClient.gossipBlockHeader(blockNumber, headerWithProof)).isOkOr: - error "Failed to gossip block header", error, blockHash + await bridge.gossipBlockHeader(blockNumber, headerWithProof) if not bodySuccess: - ( - await portalClient.gossipBlockBody( - blockHash, PortalBlockBodyLegacy.fromBlockBody(body) - ) - ).isOkOr: - error "Failed to gossip block body", error, blockHash + await bridge.gossipBlockBody(blockHash, PortalBlockBodyLegacy.fromBlockBody(body)) if not receiptsSuccess: - ( - await portalClient.gossipReceipts( - blockHash, PortalReceipts.fromReceipts(receipts) - ) - ).isOkOr: - error "Failed to gossip receipts", error, blockHash + await bridge.gossipReceipts(blockHash, PortalReceipts.fromReceipts(receipts)) await sleepAsync(2.seconds) proc runHistory*(config: PortalBridgeConf) = - let - portalClient = newRpcClientConnect(config.portalRpcUrl) - web3Client = newRpcClientConnect(config.web3Url) + let bridge = PortalHistoryBridge( + portalClient: newRpcClientConnect(config.portalRpcUrl), + web3Client: newRpcClientConnect(config.web3Url), + gossipQueue: newAsyncQueue[(seq[byte], seq[byte])](config.gossipConcurrency), + ) + + proc gossipWorker(bridge: PortalHistoryBridge) {.async: (raises: []).} = + try: + while true: + let + (contentKey, contentValue) = await bridge.gossipQueue.popFirst() + contentKeyHex = contentKey.toHex() + contentValueHex = contentValue.toHex() + + try: + let peers = await bridge.portalClient.portal_historyGossip( + contentKeyHex, contentValueHex + ) + debug "Content gossiped", peers, contentKey = contentKeyHex + except CancelledError as e: + trace "Cancelled gossipWorker" + raise e + except CatchableError as e: + error "JSON-RPC portal_historyGossip failed", + error = $e.msg, contentKey = contentKeyHex + except CancelledError: + trace "gossipWorker canceled" + + var workers: seq[Future[void]] = @[] + for i in 0 ..< config.gossipConcurrency: + workers.add bridge.gossipWorker() if config.latest: - asyncSpawn runLatestLoop(portalClient, web3Client, config.blockVerify) + asyncSpawn bridge.runLatestLoop(config.blockVerify) if config.backfill: if config.audit: - asyncSpawn runBackfillLoopAuditMode( - portalClient, web3Client, config.era1Dir.string - ) + asyncSpawn bridge.runBackfillLoopAuditMode(config.era1Dir.string) else: - asyncSpawn runBackfillLoop(portalClient, web3Client, config.era1Dir.string) + asyncSpawn bridge.runBackfillLoop( + config.era1Dir.string, config.startEra, config.endEra + ) while true: poll() diff --git a/fluffy/tools/portal_bridge/portal_bridge_state.nim b/fluffy/tools/portal_bridge/portal_bridge_state.nim index eff98584b9..1b0c7cda17 100644 --- a/fluffy/tools/portal_bridge/portal_bridge_state.nim +++ b/fluffy/tools/portal_bridge/portal_bridge_state.nim @@ -263,6 +263,7 @@ proc runBackfillGossipBlockOffersLoop( portalRpcUrl: JsonRpcUrl, portalNodeId: NodeId, verifyGossip: bool, + skipGossipForExisting: bool, workerId: int, ) {.async: (raises: [CancelledError]).} = info "Starting state backfill gossip block offers loop", workerId @@ -304,18 +305,29 @@ proc runBackfillGossipBlockOffersLoop( var retryGossip = false for k, v in offersMap: - try: - let numPeers = await portalClient.portal_stateGossip(k.to0xHex(), v.to0xHex()) - if numPeers > 0: - debug "Offer successfully gossipped to peers: ", numPeers, workerId - elif numPeers == 0: - warn "Offer gossipped to no peers", workerId + var gossipContent = true + if skipGossipForExisting: + try: + let contentInfo = await portalClient.portal_stateGetContent(k.to0xHex()) + if contentInfo.content.len() > 0: + gossipContent = false + except CatchableError as e: + warn "Failed to find content with key: ", + contentKey = k.to0xHex(), error = e.msg, workerId + + if gossipContent: + try: + let numPeers = await portalClient.portal_stateGossip(k.to0xHex(), v.to0xHex()) + if numPeers > 0: + debug "Offer successfully gossipped to peers: ", numPeers, workerId + elif numPeers == 0: + warn "Offer gossipped to no peers", workerId + retryGossip = true + break + except CatchableError as e: + error "Failed to gossip offer to peers", error = e.msg, workerId retryGossip = true break - except CatchableError as e: - error "Failed to gossip offer to peers", error = e.msg, workerId - retryGossip = true - break if retryGossip: await sleepAsync(3.seconds) @@ -336,7 +348,7 @@ proc runBackfillGossipBlockOffersLoop( break except CatchableError as e: warn "Failed to find content with key: ", - contentKey = k, error = e.msg, workerId + contentKey = k.to0xHex(), error = e.msg, workerId retryGossip = true break @@ -349,6 +361,9 @@ proc runBackfillGossipBlockOffersLoop( if blockOffers.blockNumber mod 1000 == 0: info "Finished gossiping offers for block number: ", workerId, blockNumber = blockOffers.blockNumber, offerCount = offersMap.len() + else: + debug "Finished gossiping offers for block number: ", + workerId, blockNumber = blockOffers.blockNumber, offerCount = offersMap.len() blockOffers = await blockOffersQueue.popFirst() @@ -422,7 +437,8 @@ proc runState*(config: PortalBridgeConf) = for workerId in 1 .. config.gossipWorkersCount.int: asyncSpawn runBackfillGossipBlockOffersLoop( - blockOffersQueue, config.portalRpcUrl, portalNodeId, config.verifyGossip, workerId + blockOffersQueue, config.portalRpcUrl, portalNodeId, config.verifyGossip, + config.skipGossipForExisting, workerId, ) asyncSpawn runBackfillMetricsLoop(blockDataQueue, blockOffersQueue) diff --git a/fluffy/tools/portalcli.nim b/fluffy/tools/portalcli.nim index 690d7fec76..097944f21e 100644 --- a/fluffy/tools/portalcli.nim +++ b/fluffy/tools/portalcli.nim @@ -255,6 +255,7 @@ proc run(config: PortalCliConf) = testContentIdHandler, createGetHandler(db), createStoreHandler(db, defaultRadiusConfig), + createContainsHandler(db), createRadiusHandler(db), stream, bootstrapRecords = bootstrapRecords, diff --git a/hive_integration/nodocker/consensus/consensus_sim.nim b/hive_integration/nodocker/consensus/consensus_sim.nim index 2d0b1e25e2..aa63cc1651 100644 --- a/hive_integration/nodocker/consensus/consensus_sim.nim +++ b/hive_integration/nodocker/consensus/consensus_sim.nim @@ -35,7 +35,7 @@ proc processChainData(cd: ChainData): TestStatus = # bad blocks discard importRlpBlocks(bytes, c, finalize = true) - let head = com.db.getCanonicalHead() + let head = com.db.getCanonicalHead().expect("canonical head exists") let blockHash = "0x" & head.blockHash.data.toHex if blockHash == cd.lastBlockHash: TestStatus.OK diff --git a/hive_integration/nodocker/engine/cancun/blobs.nim b/hive_integration/nodocker/engine/cancun/blobs.nim index 9d885f963f..7f83069dd3 100644 --- a/hive_integration/nodocker/engine/cancun/blobs.nim +++ b/hive_integration/nodocker/engine/cancun/blobs.nim @@ -25,12 +25,12 @@ type BlobCommitment* = object blob*: kzg.KzgBlob - commitment*: kzg.KZGCommitment + commitment*: kzg.KzgCommitment BlobTxWrapData* = object hashes*: seq[Hash32] blobs*: seq[kzg.KzgBlob] - commitments*: seq[kzg.KZGCommitment] + commitments*: seq[kzg.KzgCommitment] proofs*: seq[kzg.KzgProof] func getBlobList*(startId: BlobID, count: int): BlobIDs = @@ -147,7 +147,7 @@ proc getVersionedHash*(blobid: BlobID, commitmentVersion: byte): Hash32 = proc blobDataGenerator*(startBlobId: BlobID, blobCount: int): BlobTxWrapData = result.blobs = newSeq[kzg.KzgBlob](blobCount) - result.commitments = newSeq[kzg.KZGCommitment](blobCount) + result.commitments = newSeq[kzg.KzgCommitment](blobCount) result.hashes = newSeq[Hash32](blobCount) result.proofs = newSeq[kzg.KzgProof](blobCount) diff --git a/hive_integration/nodocker/engine/cancun/customizer.nim b/hive_integration/nodocker/engine/cancun/customizer.nim index a3d846c2e6..3c71218549 100644 --- a/hive_integration/nodocker/engine/cancun/customizer.nim +++ b/hive_integration/nodocker/engine/cancun/customizer.nim @@ -277,7 +277,7 @@ method getVersionedHashes(cust: ExtraVersionedHash, for i, h in baseVersionedHashes: v[i] = h - var extraHash = common.Hash32.randomBytes() + var extraHash = Hash32.randomBytes() extraHash.data[0] = VERSIONED_HASH_VERSION_KZG v[^1] = extraHash Opt.some(v) @@ -539,11 +539,6 @@ func scramble(data: Hash32): Opt[Hash32] = h.data[^1] = byte(255 - h.data[^1]) Opt.some(h) -func scramble(data: Bytes32): Opt[Hash32] = - var h = Hash32 data - h.data[0] = byte(255 - h.data[0]) - Opt.some(h) - # This function generates an invalid payload by taking a base payload and modifying the specified field such that it ends up being invalid. # One small consideration is that the payload needs to contain transactions and specially transactions using the PREVRANDAO opcode for all the fields to be compatible with this function. proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadField: InvalidPayloadBlockField): ExecutableData = @@ -586,7 +581,7 @@ proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadFiel of InvalidPrevRandao: # This option potentially requires a transaction that uses the PREVRANDAO opcode. # Otherwise the payload will still be valid. - let randomHash = common.Hash32.randomBytes() + let randomHash = Hash32.randomBytes() customPayloadMod = CustomPayloadData( prevRandao: Opt.some(Bytes32 randomHash.data), ) diff --git a/hive_integration/nodocker/engine/cancun/helpers.nim b/hive_integration/nodocker/engine/cancun/helpers.nim index ec9444f889..8ef4578981 100644 --- a/hive_integration/nodocker/engine/cancun/helpers.nim +++ b/hive_integration/nodocker/engine/cancun/helpers.nim @@ -15,7 +15,7 @@ import eth/common/eth_types_rlp, chronicles, stew/byteutils, - kzg4844/kzg_ex as kzg, + kzg4844/kzg, ../types, ../engine_client, ../../../../nimbus/constants, @@ -57,21 +57,6 @@ proc addBlobTransaction*(pool: TestBlobTxPool, tx: PooledTransaction) = let txHash = rlpHash(tx) pool.transactions[txHash] = tx -proc `==`(a: openArray[AccessTuple], b: openArray[AccessPair]): bool = - if a.len != b.len: - return false - - for i in 0..= Version.V2: r.expectStatus(PayloadExecutionStatus.invalid) @@ -136,8 +136,7 @@ method execute(cs: BadHashOnNewPayload, env: TestEnv): bool = # or INVALID (client still has the payload and can verify that this payload is incorrectly building on top of it), # but a VALID response is incorrect. let - version = env.engine.version(shadow.payload.timestamp) - r = env.engine.client.newPayload(version, shadow.payload) + r = env.engine.newPayload(shadow.payload) r.expectStatusEither([PayloadExecutionStatus.accepted, PayloadExecutionStatus.invalid, PayloadExecutionStatus.syncing]) return true )) @@ -185,7 +184,7 @@ method execute(cs: ParentHashOnNewPayload, env: TestEnv): bool = # Starting from Shanghai, INVALID should be returned instead (https:#githucs.com/ethereum/execution-apis/pull/338) let version = env.engine.version(payload.timestamp) - r = env.engine.client.newPayload(version, payload) + r = env.engine.newPayload(payload) if version >= Version.V2: r.expectStatus(PayloadExecutionStatus.invalid) diff --git a/hive_integration/nodocker/engine/engine/forkchoice.nim b/hive_integration/nodocker/engine/engine/forkchoice.nim index 4a46ee8159..0635fea643 100644 --- a/hive_integration/nodocker/engine/engine/forkchoice.nim +++ b/hive_integration/nodocker/engine/engine/forkchoice.nim @@ -12,7 +12,8 @@ import std/strutils, chronicles, ./engine_spec, - ../cancun/customizer + ../cancun/customizer, + ../../../../nimbus/utils/utils type ForkchoiceStateField* = enum @@ -60,8 +61,7 @@ method execute(cs: InconsistentForkchoiceTest, env: TestEnv): bool = shadow.canon.add env.clMock.latestExecutableData # Send the alternative payload - let version = env.engine.version(altPayload.timestamp) - let r = env.engine.client.newPayload(version, altPayload) + let r = env.engine.newPayload(altPayload) r.expectStatusEither([PayloadExecutionStatus.valid, PayloadExecutionStatus.accepted]) return true )) @@ -77,18 +77,18 @@ method execute(cs: InconsistentForkchoiceTest, env: TestEnv): bool = case cs.field of HeadBlockHash: - inconsistentFcU.headblockHash = shadow.alt[len(shadow.alt)-1].blockHash + inconsistentFcU.headBlockHash = shadow.alt[len(shadow.alt)-1].blockHash of SafeBlockHash: - inconsistentFcU.safeblockHash = shadow.alt[len(shadow.canon)-2].blockHash + inconsistentFcU.safeBlockHash = shadow.alt[len(shadow.canon)-2].blockHash of FinalizedBlockHash: - inconsistentFcU.finalizedblockHash = shadow.alt[len(shadow.canon)-3].blockHash + inconsistentFcU.finalizedBlockHash = shadow.alt[len(shadow.canon)-3].blockHash - let version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp) - var r = env.engine.client.forkchoiceUpdated(version, inconsistentFcU) + let timeVer = env.clMock.latestPayloadBuilt.timestamp + var r = env.engine.forkchoiceUpdated(timeVer, inconsistentFcU) r.expectErrorCode(engineApiInvalidForkchoiceState) # Return to the canonical chain - r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice) + r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice) r.expectPayloadStatus(PayloadExecutionStatus.valid) return true @@ -118,56 +118,56 @@ method execute(cs: ForkchoiceUpdatedUnknownBlockHashTest, env: TestEnv): bool = if cs.field == HeadBlockHash: let fcu = ForkchoiceStateV1( - headblockHash: randomblockHash, - safeblockHash: env.clMock.latestForkchoice.safeblockHash, - finalizedblockHash: env.clMock.latestForkchoice.finalizedblockHash, + headBlockHash: randomblockHash, + safeBlockHash: env.clMock.latestForkchoice.safeBlockHash, + finalizedBlockHash: env.clMock.latestForkchoice.finalizedBlockHash, ) info "forkchoiceStateUnknownHeadHash", - head=fcu.headblockHash.short, - safe=fcu.safeblockHash.short, - final=fcu.finalizedblockHash.short + head=fcu.headBlockHash.short, + safe=fcu.safeBlockHash.short, + final=fcu.finalizedBlockHash.short # Execution specification:: # - (payloadStatus: (status: SYNCING, latestValidHash: null, validationError: null), payloadId: null) # if forkchoiceState.headblockHash references an unknown payload or a payload that can't be validated # because requisite data for the validation is missing - let version = env.engine.version(env.clMock.latestExecutedPayload.timestamp) - var r = env.engine.client.forkchoiceUpdated(version, fcu) + let timeVer = env.clMock.latestExecutedPayload.timestamp + var r = env.engine.forkchoiceUpdated(timeVer, fcu) r.expectPayloadStatus(PayloadExecutionStatus.syncing) var payloadAttributes = env.clMock.latestPayloadAttributes payloadAttributes.timestamp = w3Qty(payloadAttributes.timestamp, 1) # Test again using PayloadAttributes, should also return SYNCING and no PayloadID - r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(payloadAttributes)) + r = env.engine.forkchoiceUpdated(timeVer, fcu, Opt.some(payloadAttributes)) r.expectPayloadStatus(PayloadExecutionStatus.syncing) - r.expectPayloadID(Opt.none(PayloadID)) + r.expectPayloadID(Opt.none(Bytes8)) else: let pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks( # Run test after a new payload has been broadcast onNewPayloadBroadcast: proc(): bool = var fcu = ForkchoiceStateV1( - headblockHash: env.clMock.latestExecutedPayload.blockHash, - safeblockHash: env.clMock.latestForkchoice.safeblockHash, - finalizedblockHash: env.clMock.latestForkchoice.finalizedblockHash, + headBlockHash: env.clMock.latestExecutedPayload.blockHash, + safeBlockHash: env.clMock.latestForkchoice.safeBlockHash, + finalizedBlockHash: env.clMock.latestForkchoice.finalizedBlockHash, ) if cs.field == SafeBlockHash: - fcu.safeblockHash = randomblockHash + fcu.safeBlockHash = randomblockHash elif cs.field == FinalizedBlockHash: - fcu.finalizedblockHash = randomblockHash + fcu.finalizedBlockHash = randomblockHash - let version = env.engine.version(env.clMock.latestExecutedPayload.timestamp) - var r = env.engine.client.forkchoiceUpdated(version, fcu) + let timeVer = env.clMock.latestExecutedPayload.timestamp + var r = env.engine.forkchoiceUpdated(timeVer, fcu) r.expectError() var payloadAttributes = env.clMock.latestPayloadAttributes - payloadAttributes.prevRandao = default(Hash32) + payloadAttributes.prevRandao = default(Bytes32) payloadAttributes.suggestedFeeRecipient = default(Address) # Test again using PayloadAttributes, should also return INVALID and no PayloadID - r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(payloadAttributes)) + r = env.engine.forkchoiceUpdated(timeVer, fcu, Opt.some(payloadAttributes)) r.expectError() return true )) diff --git a/hive_integration/nodocker/engine/engine/invalid_ancestor.nim b/hive_integration/nodocker/engine/engine/invalid_ancestor.nim index aa7c9c0951..c41259c4e1 100644 --- a/hive_integration/nodocker/engine/engine/invalid_ancestor.nim +++ b/hive_integration/nodocker/engine/engine/invalid_ancestor.nim @@ -117,12 +117,12 @@ method execute(cs: InvalidMissingAncestorReOrgTest, env: TestEnv): bool = blockHash=shadow.payloads[i].blockHash.short, number=shadow.payloads[i].blockNumber.uint64 - let version = env.engine.version(shadow.payloads[i].timestamp) - let r = env.engine.client.newPayload(version, shadow.payloads[i]) + let r = env.engine.newPayload(shadow.payloads[i]) let fcState = ForkchoiceStateV1( headblockHash: shadow.payloads[i].blockHash, ) - let p = env.engine.client.forkchoiceUpdated(version, fcState) + let timeVer = shadow.payloads[i].timestamp + let p = env.engine.forkchoiceUpdated(timeVer, fcState) if i == cs.invalidIndex: # If this is the first payload after the common ancestor, and this is the payload we invalidated, @@ -147,8 +147,8 @@ method execute(cs: InvalidMissingAncestorReOrgTest, env: TestEnv): bool = p.expectLatestValidHash(shadow.payloads[i].blockHash) # Resend the latest correct fcU - let version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp) - let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice) + let timeVer = env.clMock.latestPayloadBuilt.timestamp + let r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice) r.expectNoError() # After this point, the CL Mock will send the next payload of the canonical chain return true @@ -192,10 +192,11 @@ method getName(cs: InvalidMissingAncestorReOrgSyncTest): string = "Invalid Missing Ancestor Syncing ReOrg, $1, EmptyTxs=$2, CanonicalReOrg=$3, Invalid P$4" % [ $cs.invalidField, $cs.emptyTransactions, $cs.reOrgFromCanonical, $cs.invalidIndex] -func blockHeader(ex: ExecutableData): common.BlockHeader = - blockHeader(ex.basePayload, ex.beaconRoot) +func blockHeader(ex: ExecutableData): Header = + let requestsHash = calcRequestsHash(ex.executionRequests) + blockHeader(ex.basePayload, ex.beaconRoot, requestsHash) -func blockBody(ex: ExecutableData): common.BlockBody = +func blockBody(ex: ExecutableData): BlockBody = blockBody(ex.basePayload) method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool = @@ -307,14 +308,14 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool = if i < cs.invalidIndex: let p = shadow.payloads[i] - let version = sec.version(p.timestamp) - let r = sec.client.newPayload(version, p) + let r = sec.newPayload(p) #r.ExpectationDescription = "Sent modified payload to secondary client, expected to be accepted" r.expectStatusEither([PayloadExecutionStatus.valid, PayloadExecutionStatus.accepted]) let fcu = ForkchoiceStateV1( headblockHash: p.blockHash, ) - let s = sec.client.forkchoiceUpdated(version, fcu) + let timeVer = sec.version(p.timestamp) + let s = sec.forkchoiceUpdated(timeVer, fcu) #s.ExpectationDescription = "Sent modified payload forkchoice updated to secondary client, expected to be accepted" s.expectStatusEither([PayloadExecutionStatus.valid, PayloadExecutionStatus.syncing]) @@ -361,14 +362,14 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool = # If we are syncing through p2p, we need to keep polling until the client syncs the missing payloads let period = chronos.milliseconds(500) while true: - let version = env.engine.version(shadow.payloads[shadow.n].timestamp) - let r = env.engine.client.newPayload(version, shadow.payloads[shadow.n]) + let r = env.engine.newPayload(shadow.payloads[shadow.n]) info "Response from main client", status=r.get.status let fcu = ForkchoiceStateV1( headblockHash: shadow.payloads[shadow.n].blockHash, ) - let s = env.engine.client.forkchoiceUpdated(version, fcu) + let timeVer = shadow.payloads[shadow.n].timestamp + let s = env.engine.forkchoiceUpdated(timeVer, fcu) info "Response from main client fcu", status=s.get.payloadStatus.status if r.get.status == PayloadExecutionStatus.invalid: @@ -412,12 +413,12 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool = for i in start..stop: if env.clMock.executedPayloadHistory.hasKey(i): let payload = env.clMock.executedPayloadHistory[i] - let r = env.engine.client.newPayload(payload) + let r = env.engine.newPayload(payload) r.expectStatus(PayloadExecutionStatus.valid) # Resend the latest correct fcU - let version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp) - let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice) + let timeVer = env.clMock.latestPayloadBuilt.timestamp + let r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice) r.expectNoError() # After this point, the CL Mock will send the next payload of the canonical chain return true diff --git a/hive_integration/nodocker/engine/engine/invalid_payload.nim b/hive_integration/nodocker/engine/engine/invalid_payload.nim index 456ad3c637..ffa8fab8ca 100644 --- a/hive_integration/nodocker/engine/engine/invalid_payload.nim +++ b/hive_integration/nodocker/engine/engine/invalid_payload.nim @@ -98,8 +98,8 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = )) ## This block is now unknown to the main client, sending an fcU will set it to cs.syncing mode - let version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp) - let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice) + let timeVer = env.clMock.latestPayloadBuilt.timestamp + let r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice) r.expectPayloadStatus(PayloadExecutionStatus.syncing) let shadow = InvalidPayloadShadow( @@ -132,8 +132,7 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = shadow.nilLatestValidHash = true # Depending on the field we modified, we expect a different status - var version = env.engine.version(shadow.alteredPayload.timestamp) - let r = env.engine.client.newPayload(version, shadow.alteredPayload) + let r = env.engine.newPayload(shadow.alteredPayload) if cs.syncing or cs.invalidField == InvalidParentHash: # Execution specification:: # (status: ACCEPTED, latestValidHash: null, validationError: null) if the following conditions are met: @@ -166,14 +165,14 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = var attr = env.clMock.latestPayloadAttributes attr.timestamp = w3Qty(shadow.alteredPayload.timestamp, 1) - attr.prevRandao = default(Hash32) + attr.prevRandao = default(Bytes32) attr.suggestedFeeRecipient = default(Address) # Execution specification: # (payloadStatus: (status: INVALID, latestValidHash: null, validationError: errorMessage | null), payloadId: null) # obtained from the Payload validation process if the payload is deemed INVALID - version = env.engine.version(shadow.alteredPayload.timestamp) - let s = env.engine.client.forkchoiceUpdated(version, fcState, Opt.some(attr)) + var timeVer = shadow.alteredPayload.timestamp + let s = env.engine.forkchoiceUpdated(timeVer, fcState, Opt.some(attr)) if not cs.syncing: # Execution specification: # (payloadStatus: (status: INVALID, latestValidHash: null, validationError: errorMessage | null), payloadId: null) @@ -185,22 +184,21 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = s.expectPayloadStatus(PayloadExecutionStatus.syncing) # When we send the previous payload, the client must now be capable of determining that the invalid payload is actually invalid - let version = env.engine.version(env.clMock.latestExecutedPayload.timestamp) - let p = env.engine.client.newPayload(version, env.clMock.latestExecutedPayload) + let p = env.engine.newPayload(env.clMock.latestExecutedPayload) p.expectStatus(PayloadExecutionStatus.valid) p.expectLatestValidHash(env.clMock.latestExecutedPayload.blockHash) # Another option here could be to send an fcU to the previous payload, # but this does not seem like something the CL would do. - #s = env.engine.client.forkchoiceUpdated(ForkchoiceStateV1( + #s = env.engine.forkchoiceUpdated(ForkchoiceStateV1( # headblockHash: previousPayload.blockHash, # safeblockHash: previousPayload.blockHash, # finalizedblockHash: previousPayload.blockHash, #), nil) #s.expectPayloadStatus(Valid) - let q = env.engine.client.newPayload(version, shadow.alteredPayload) + let q = env.engine.newPayload(shadow.alteredPayload) if cs.invalidField == InvalidParentHash: # There is no invalid parentHash, if this value is incorrect, # it is assumed that the block is missing and we need to sync. @@ -224,8 +222,8 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = # Try sending the fcU again, this time we should get the proper invalid response. # At this moment the response should be INVALID if cs.invalidField != InvalidParentHash: - let version = env.engine.version(shadow.alteredPayload.timestamp) - let s = env.engine.client.forkchoiceUpdated(version, fcState) + let timeVer = shadow.alteredPayload.timestamp + let s = env.engine.forkchoiceUpdated(timeVer, fcState) # Note: syncing is acceptable here as long as the block produced after this test is produced successfully s.expectStatusEither([PayloadExecutionStatus.syncing, PayloadExecutionStatus.invalid]) @@ -238,12 +236,12 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = if cs.syncing: # Send the valid payload and its corresponding forkchoiceUpdated - let version = env.engine.version(env.clMock.latestExecutedPayload.timestamp) - let r = env.engine.client.newPayload(version, env.clMock.latestExecutedPayload) + let r = env.engine.newPayload(env.clMock.latestExecutedPayload) r.expectStatus(PayloadExecutionStatus.valid) r.expectLatestValidHash(env.clMock.latestExecutedPayload.blockHash) - let s = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice) + let timeVer = env.clMock.latestExecutedPayload.timestamp + let s = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice) s.expectPayloadStatus(PayloadExecutionStatus.valid) s.expectLatestValidHash(env.clMock.latestExecutedPayload.blockHash) @@ -273,8 +271,7 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool = # or syncing (parent payload is thrown out and also client assumes that the parent is part of canonical chain) # or INVALID (client still has the payload and can verify that this payload is incorrectly building on top of it), # but a VALID response is incorrect. - let version = env.engine.version(followUpAlteredPayload.timestamp) - let r = env.engine.client.newPayload(version, followUpAlteredPayload) + let r = env.engine.newPayload(followUpAlteredPayload) r.expectStatusEither([PayloadExecutionStatus.accepted, PayloadExecutionStatus.invalid, PayloadExecutionStatus.syncing]) if r.get.status in [PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing]: r.expectLatestValidHash() @@ -332,12 +329,12 @@ method execute(cs: PayloadBuildAfterInvalidPayloadTest, env: TestEnv): bool = # Get a payload from the invalid payload producer and invalidate it let customizer = BasePayloadAttributesCustomizer( - prevRandao: Opt.some(default(Hash32)), + prevRandao: Opt.some(default(Bytes32)), suggestedFeerecipient: Opt.some(ZeroAddr), ) payloadAttributes = customizer.getPayloadAttributes(env.clMock.latestPayloadAttributes) - version = env.engine.version(env.clMock.latestHeader.timestamp) - r = invalidPayloadProducer.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(payloadAttributes)) + timeVer = env.clMock.latestHeader.timestamp + r = invalidPayloadProducer.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice, payloadAttributes) r.expectPayloadStatus(PayloadExecutionStatus.valid) # Wait for the payload to be produced by the EL @@ -346,7 +343,7 @@ method execute(cs: PayloadBuildAfterInvalidPayloadTest, env: TestEnv): bool = let versione = env.engine.version(payloadAttributes.timestamp) - s = invalidPayloadProducer.client.getPayload(r.get.payloadID.get, versione) + s = invalidPayloadProducer.client.getPayload(versione, r.get.payloadId.get) s.expectNoError() let basePayload = s.get.executionPayload @@ -359,15 +356,14 @@ method execute(cs: PayloadBuildAfterInvalidPayloadTest, env: TestEnv): bool = # Broadcast the invalid payload let - version = env.engine.version(inv_p.timestamp) - r = env.engine.client.newPayload(version, inv_p) + r = env.engine.newPayload(inv_p) r.expectStatus(PayloadExecutionStatus.invalid) - r.expectLatestValidHash(env.clMock.latestForkchoice.headblockHash) + r.expectLatestValidHash(env.clMock.latestForkchoice.headBlockHash) - let s = sec.client.newPayload(version, inv_p) + let s = sec.newPayload(inv_p) s.expectStatus(PayloadExecutionStatus.invalid) - s.expectLatestValidHash(env.clMock.latestForkchoice.headblockHash) + s.expectLatestValidHash(env.clMock.latestForkchoice.headBlockHash) # Let the block production continue. # At this point the selected payload producer will diff --git a/hive_integration/nodocker/engine/engine/payload_attributes.nim b/hive_integration/nodocker/engine/engine/payload_attributes.nim index 1344205361..14dce4b0b0 100644 --- a/hive_integration/nodocker/engine/engine/payload_attributes.nim +++ b/hive_integration/nodocker/engine/engine/payload_attributes.nim @@ -46,9 +46,9 @@ method execute(cs: InvalidPayloadAttributesTest, env: TestEnv): bool = var fcu = env.clMock.latestForkchoice if cs.syncing: # Setting a random hash will put the client into `SYNCING` - fcu.headblockHash = Hash32.randomBytes() + fcu.headBlockHash = Hash32.randomBytes() else: - fcu.headblockHash = env.clMock.latestPayloadBuilt.blockHash + fcu.headBlockHash = env.clMock.latestPayloadBuilt.blockHash info "Sending EngineForkchoiceUpdated with invalid payload attributes", syncing=cs.syncing, description=cs.description @@ -63,20 +63,20 @@ method execute(cs: InvalidPayloadAttributesTest, env: TestEnv): bool = # 2) Apply forkchoiceState # 3) Check payloadAttributes, if invalid respond with error: code: Invalid payload attributes # 4) Start payload build process and respond with VALID - let version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp) + let timeVer = env.clMock.latestPayloadBuilt.timestamp if cs.syncing: # If we are SYNCING, the outcome should be SYNCING regardless of the validity of the payload atttributes - let r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(attr)) + let r = env.engine.forkchoiceUpdated(timeVer, fcu, attr) r.expectPayloadStatus(PayloadExecutionStatus.syncing) - r.expectPayloadID(Opt.none(PayloadID)) + r.expectPayloadID(Opt.none(Bytes8)) else: - let r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(attr)) + let r = env.engine.forkchoiceUpdated(timeVer, fcu, attr) r.expectErrorCode(engineApiInvalidPayloadAttributes) # Check that the forkchoice was applied, regardless of the error let s = env.engine.client.latestHeader() #s.ExpectationDescription = "Forkchoice is applied even on invalid payload attributes" - s.expectHash(fcu.headblockHash) + s.expectHash(fcu.headBlockHash) return true )) diff --git a/hive_integration/nodocker/engine/engine/payload_execution.nim b/hive_integration/nodocker/engine/engine/payload_execution.nim index a5b4a88773..e8e39985e3 100644 --- a/hive_integration/nodocker/engine/engine/payload_execution.nim +++ b/hive_integration/nodocker/engine/engine/payload_execution.nim @@ -69,7 +69,7 @@ method execute(cs: ReExecutePayloadTest, env: TestEnv): bool = for i in start..lastBlock: doAssert env.clMock.executedPayloadHistory.hasKey(i) let payload = env.clMock.executedPayloadHistory[i] - let r = env.engine.client.newPayload(payload) + let r = env.engine.newPayload(payload) r.expectStatus(PayloadExecutionStatus.valid) r.expectLatestValidHash(payload.blockHash) @@ -152,8 +152,8 @@ method execute(cs: InOrderPayloadExecutionTest, env: TestEnv): bool = finalizedblockHash: env.clMock.latestExecutedPayload.blockHash, ) - var version = sec.version(env.clMock.latestExecutedPayload.timestamp) - var s = sec.client.forkchoiceUpdated(version, fcU) + var timeVer = env.clMock.latestExecutedPayload.timestamp + var s = sec.forkchoiceUpdated(timeVer, fcU) s.expectPayloadStatus(PayloadExecutionStatus.syncing) s.expectLatestValidHash() s.expectNoValidationError() @@ -162,14 +162,14 @@ method execute(cs: InOrderPayloadExecutionTest, env: TestEnv): bool = let start = env.clMock.firstPoSBlockNumber.get for k in start..env.clMock.latestExecutedPayload.blockNumber.uint64: let payload = env.clMock.executedPayloadHistory[k] - let s = sec.client.newPayload(payload) + let s = sec.newPayload(payload) s.expectStatus(PayloadExecutionStatus.valid) s.expectLatestValidHash(payload.blockHash) - version = sec.version(env.clMock.latestExecutedPayload.timestamp) - s = sec.client.forkchoiceUpdated(version, fcU) + timeVer = env.clMock.latestExecutedPayload.timestamp + s = sec.forkchoiceUpdated(timeVer, fcU) s.expectPayloadStatus(PayloadExecutionStatus.valid) - s.expectLatestValidHash(fcU.headblockHash) + s.expectLatestValidHash(fcU.headBlockHash) s.expectNoValidationError() # At this point we should have our funded account balance equal to the expected value. @@ -243,13 +243,12 @@ method execute(cs: MultiplePayloadsExtendingCanonicalChainTest, env: TestEnv): b # Fabricate and send multiple new payloads by changing the PrevRandao field for i in 0.. 0: + let alternativePayload = g.get.toExecutableData(attr) + testCond len(alternativePayload.basePayload.transactions) > 0: fatal "alternative payload does not contain the prevRandao opcode tx" - let s = env.engine.client.newPayload(alternativePayload) + let s = env.engine.newPayload(alternativePayload) s.expectStatus(PayloadExecutionStatus.valid) s.expectLatestValidHash(alternativePayload.blockHash) @@ -88,8 +87,8 @@ method execute(cs: SidechainReOrgTest, env: TestEnv): bool = safeBlockHash: env.clMock.latestForkchoice.safeBlockHash, finalizedBlockHash: env.clMock.latestForkchoice.finalizedBlockHash, ) - version = env.engine.version(alternativePayload.timestamp) - let p = env.engine.client.forkchoiceUpdated(version, fcu) + timeVer = alternativePayload.timestamp + let p = env.engine.forkchoiceUpdated(timeVer, fcu) p.expectPayloadStatus(PayloadExecutionStatus.valid) # PrevRandao should be the alternative prevRandao we sent @@ -105,7 +104,7 @@ method execute(cs: SidechainReOrgTest, env: TestEnv): bool = # Test performing a re-org that involves removing or modifying a transaction type - TransactionReOrgScenario = enum + TransactionReOrgScenario* = enum TransactionNoScenario TransactionReOrgScenarioReOrgOut = "Re-Org Out" TransactionReOrgScenarioReOrgBackIn = "Re-Org Back In" @@ -118,7 +117,7 @@ type scenario*: TransactionReOrgScenario ShadowTx = ref object - payload: ExecutionPayload + payload: ExecutableData nextTx: PooledTransaction tx: Opt[PooledTransaction] sendTransaction: proc(i: int): PooledTransaction {.gcsafe.} @@ -154,7 +153,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = # Send a transaction on each payload of the canonical chain shadow.sendTransaction = proc(i: int): PooledTransaction {.gcsafe.} = - let sstoreContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000317") + let sstoreContractAddr = address"0000000000000000000000000000000000000317" var data: array[32, byte] data[^1] = i.byte info "transactionReorg", idx=i @@ -182,20 +181,19 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = if cs.scenario == TransactionReOrgScenarioReOrgOut: # Any payload we get should not contain any var attr = env.clMock.latestPayloadAttributes - attr.prevRandao = Hash32.randomBytes() + attr.prevRandao = Bytes32.randomBytes() - var version = env.engine.version(env.clMock.latestHeader.timestamp) - let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr)) + var timeVer = env.clMock.latestHeader.timestamp + let r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice, attr) r.expectNoError() - testCond r.get.payloadID.isSome: + testCond r.get.payloadId.isSome: fatal "No payload ID returned by forkchoiceUpdated" - version = env.engine.version(attr.timestamp) - let g = env.engine.client.getPayload(r.get.payloadID.get, version) + let g = env.engine.getPayload(attr.timestamp, r.get.payloadId.get) g.expectNoError() - shadow.payload = g.get.executionPayload + shadow.payload = g.get.toExecutableData(attr) - testCond len(shadow.payload.transactions) == 0: + testCond len(shadow.payload.basePayload.transactions) == 0: fatal "Empty payload contains transactions" if cs.scenario != TransactionReOrgScenarioReOrgBackIn: @@ -221,7 +219,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = let customizer = CustomPayloadData( extraData: Opt.some(@[0x01.byte]) ) - shadow.payload = customizer.customizePayload(env.clMock.latestExecutableData).basePayload + shadow.payload = customizer.customizePayload(env.clMock.latestExecutableData) testCond shadow.payload.parentHash == env.clMock.latestPayloadBuilt.parentHash: fatal "Incorrect parent hash for payloads" @@ -243,16 +241,16 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = var payloadAttributes = env.clMock.latestPayloadAttributes payloadAttributes.suggestedFeeRecipient = Address.randomBytes() - var version = env.engine.version(env.clMock.latestHeader.timestamp) - let f = env.engine.client.forkchoiceUpdated(version, forkchoiceUpdated, Opt.some(payloadAttributes)) + var timeVer = Quantity env.clMock.latestHeader.timestamp + let f = env.engine.forkchoiceUpdated(timeVer, forkchoiceUpdated, payloadAttributes) f.expectPayloadStatus(PayloadExecutionStatus.valid) # Wait a second for the client to prepare the payload with the included transaction let period = chronos.seconds(env.clMock.payloadProductionClientDelay) waitFor sleepAsync(period) - version = env.engine.version(env.clMock.latestPayloadAttributes.timestamp) - let g = env.engine.client.getPayload(f.get.payloadID.get, version) + timeVer = env.clMock.latestPayloadAttributes.timestamp + let g = env.engine.getPayload(timeVer, f.get.payloadId.get) g.expectNoError() let payload = g.get.executionPayload @@ -260,13 +258,13 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = fatal "Payload built does not contain the transaction" # Send the new payload and forkchoiceUpdated to it - let n = env.engine.client.newPayload(payload) + let n = env.engine.newPayload(g.get.toExecutableData(payloadAttributes)) n.expectStatus(PayloadExecutionStatus.valid) forkchoiceUpdated.headBlockHash = payload.blockHash - version = env.engine.version(payload.timestamp) - let s = env.engine.client.forkchoiceUpdated(version, forkchoiceUpdated) + timeVer = payload.timestamp + let s = env.engine.forkchoiceUpdated(timeVer, forkchoiceUpdated) s.expectPayloadStatus(PayloadExecutionStatus.valid) return true , @@ -294,7 +292,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = #if shadow.payload == nil ( # fatal "No payload to re-org to", t.TestName) - let r = env.engine.client.newPayload(shadow.payload) + let r = env.engine.newPayload(shadow.payload) r.expectStatus(PayloadExecutionStatus.valid) r.expectLatestValidHash(shadow.payload.blockHash) @@ -304,8 +302,8 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = finalizedBlockHash: env.clMock.latestForkchoice.finalizedBlockHash, ) - var version = env.engine.version(shadow.payload.timestamp) - let s = env.engine.client.forkchoiceUpdated(version, fcu) + var timeVer = shadow.payload.timestamp + let s = env.engine.forkchoiceUpdated(timeVer, fcu) s.expectPayloadStatus(PayloadExecutionStatus.valid) let p = env.engine.client.namedHeader(Head) @@ -320,11 +318,11 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool = # Re-org back if cs.scenario == TransactionReOrgScenarioNewPayloadOnRevert: - let r = env.engine.client.newPayload(env.clMock.latestPayloadBuilt) + let r = env.engine.newPayload(env.clMock.latestExecutedPayload) r.expectStatus(PayloadExecutionStatus.valid) r.expectLatestValidHash(env.clMock.latestPayloadBuilt.blockHash) - testCond env.clMock.broadcastForkchoiceUpdated(Version.V1, env.clMock.latestForkchoice) + testCond env.clMock.broadcastLatestForkchoice() if shadow.tx.isSome: # Now it should be back with main payload @@ -375,7 +373,7 @@ type ShadowCanon = ref object previousHash: Hash32 previousTimestamp: Web3Quantity - payload: ExecutionPayload + payload: ExecutableData parentForkchoice: ForkchoiceStateV1 parentTimestamp: uint64 @@ -423,19 +421,18 @@ method execute(cs: ReOrgBackToCanonicalTest, env: TestEnv): bool = var pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks( onPayloadAttributesGenerated: proc(): bool = var attr = env.clMock.latestPayloadAttributes - attr.prevRandao = Hash32.randomBytes() + attr.prevRandao = Bytes32.randomBytes() - var version = env.engine.version(env.clMock.latestHeader.timestamp) - let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr)) + var timeVer = Quantity env.clMock.latestHeader.timestamp + let r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice, attr) r.expectNoError() - testCond r.get.payloadID.isSome: + testCond r.get.payloadId.isSome: fatal "No payload ID returned by forkchoiceUpdated" - version = env.engine.version(attr.timestamp) - let g = env.engine.client.getPayload(r.get.payloadID.get, version) + let g = env.engine.getPayload(attr.timestamp, r.get.payloadId.get) g.expectNoError() - shadow.payload = g.get.executionPayload + shadow.payload = g.get.toExecutableData(attr) shadow.parentForkchoice = env.clMock.latestForkchoice shadow.parentTimestamp = env.clMock.latestHeader.timestamp.uint64 return true @@ -465,13 +462,13 @@ method execute(cs: ReOrgBackToCanonicalTest, env: TestEnv): bool = onGetpayload: proc(): bool = # We are about to execute the new payload of the canonical chain, re-org back to # the side payload - var version = env.engine.version(shadow.parentTimestamp) - let f = env.engine.client.forkchoiceUpdated(version, shadow.parentForkchoice) + var timeVer = shadow.parentTimestamp + let f = env.engine.forkchoiceUpdated(timeVer, shadow.parentForkchoice) f.expectPayloadStatus(PayloadExecutionStatus.valid) f.expectLatestValidHash(shadow.parentForkchoice.headBlockHash) # Execute the side payload - let n = env.engine.client.newPayload(shadow.payload) + let n = env.engine.newPayload(shadow.payload) n.expectStatus(PayloadExecutionStatus.valid) n.expectLatestValidHash(shadow.payload.blockHash) # At this point the next canonical payload will be executed by the CL mock, so we can @@ -490,13 +487,13 @@ method execute(cs: ReOrgBackToCanonicalTest, env: TestEnv): bool = ) # It is only expected that the client does not produce an error and the CL Mocker is able to progress after the re-org - var version = env.engine.version(shadow.previousTimestamp) - var r = env.engine.client.forkchoiceUpdated(version, fcu) + var timeVer = shadow.previousTimestamp + var r = env.engine.forkchoiceUpdated(timeVer, fcu) r.expectNoError() # Re-send the ForkchoiceUpdated that the CLMock had sent - version = env.engine.version(env.clMock.latestExecutedPayload.timestamp) - r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice) + timeVer = env.clMock.latestExecutedPayload.timestamp + r = env.engine.forkchoiceUpdated(timeVer, env.clMock.latestForkchoice) r.expectNoError() return true )) @@ -569,8 +566,7 @@ method execute(cs: ReOrgBackFromSyncingTest, env: TestEnv): bool = onGetpayload: proc(): bool = # Re-org to the unavailable sidechain in the middle of block production # to be able to re-org back to the canonical chain - var version = env.engine.version(shadow.payloads[^1].timestamp) - let r = env.engine.client.newPayload(version, shadow.payloads[^1]) + let r = env.engine.newPayload(shadow.payloads[^1]) r.expectStatusEither([PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted]) r.expectLatestValidHash() @@ -582,8 +578,8 @@ method execute(cs: ReOrgBackFromSyncingTest, env: TestEnv): bool = ) # It is only expected that the client does not produce an error and the CL Mocker is able to progress after the re-org - version = env.engine.version(shadow.payloads[^1].timestamp) - let s = env.engine.client.forkchoiceUpdated(version, fcu) + let timeVer = shadow.payloads[^1].timestamp + let s = env.engine.forkchoiceUpdated(timeVer, fcu) s.expectLatestValidHash() s.expectPayloadStatus(PayloadExecutionStatus.syncing) @@ -610,8 +606,8 @@ func toSeq(x: string): seq[byte] = result.add z.byte func ethAddress(a, b: int): Address = - result[0] = a.byte - result[1] = b.byte + result.data[0] = a.byte + result.data[1] = b.byte # Test that performs a re-org to a previously validated payload on a side chain. method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool = @@ -655,8 +651,7 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool let payload = customData.customizePayload(env.clMock.latestExecutableData) shadow.payloads.add payload - let version = env.engine.version(payload.timestamp) - let r = env.engine.client.newPayload(version, payload) + let r = env.engine.newPayload(payload) r.expectStatus(PayloadExecutionStatus.valid) r.expectLatestValidHash(payload.blockHash) return true @@ -669,7 +664,7 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks( onGetpayload: proc(): bool = var - prevRandao = Hash32.randomBytes() + prevRandao = Bytes32.randomBytes() suggestedFeeRecipient = ethAddress(0x12, 0x34) let payloadAttributesCustomizer = BasePayloadAttributesCustomizer( @@ -686,17 +681,17 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool finalizedBlockHash: env.clMock.latestForkchoice.finalizedBlockHash, ) - var version = env.engine.version(reOrgPayload.timestamp) - let r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(newPayloadAttributes)) + var timeVer = reOrgPayload.timestamp + let r = env.engine.forkchoiceUpdated(timeVer, fcu, newPayloadAttributes) r.expectPayloadStatus(PayloadExecutionStatus.valid) r.expectLatestValidHash(reOrgPayload.blockHash) - version = env.engine.version(newPayloadAttributes.timestamp) - let p = env.engine.client.getPayload(r.get.payloadID.get, version) + timeVer = newPayloadAttributes.timestamp + let p = env.engine.getPayload(timeVer, r.get.payloadId.get) p.expectPayloadParentHash(reOrgPayload.blockHash) - let payload = p.get.executionPayload - let s = env.engine.client.newPayload(payload) + let payload = p.get.toExecutableData(newPayloadAttributes) + let s = env.engine.newPayload(payload) s.expectStatus(PayloadExecutionStatus.valid) s.expectLatestValidHash(payload.blockHash) @@ -768,8 +763,7 @@ method execute(cs: SafeReOrgToSideChainTest, env: TestEnv): bool = let pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks( onGetpayload: proc(): bool = for p in shadow.payloads: - let version = env.engine.version(p.timestamp) - let r = env.engine.client.newPayload(version, p) + let r = env.engine.newPayload(p) r.expectStatusEither([PayloadExecutionStatus.valid, PayloadExecutionStatus.accepted]) let fcu = ForkchoiceStateV1( @@ -778,8 +772,8 @@ method execute(cs: SafeReOrgToSideChainTest, env: TestEnv): bool = finalizedBlockHash: env.clMock.executedPayloadHistory[1].blockHash, ) - let version = env.engine.version(shadow.payloads[1].timestamp) - let r = env.engine.client.forkchoiceUpdated(version, fcu) + let timeVer = shadow.payloads[1].timestamp + let r = env.engine.forkchoiceUpdated(timeVer, fcu) r.expectPayloadStatus(PayloadExecutionStatus.valid) let head = env.engine.client.namedHeader(Head) diff --git a/hive_integration/nodocker/engine/engine/rpc.nim b/hive_integration/nodocker/engine/engine/rpc.nim index 93e9012d67..446d2cfb93 100644 --- a/hive_integration/nodocker/engine/engine/rpc.nim +++ b/hive_integration/nodocker/engine/engine/rpc.nim @@ -75,13 +75,13 @@ method execute(cs: BlockStatus, env: TestEnv): bool = of LatestOnNewPayload: callbacks.onGetPayload = proc(): bool = let r = env.engine.client.namedHeader(Head) - r.expectHash(env.clMock.latestForkchoice.headblockHash) + r.expectHash(env.clMock.latestForkchoice.headBlockHash) let s = env.engine.client.blockNumber() s.expectNumber(env.clMock.latestHeadNumber.uint64) let p = env.engine.client.namedHeader(Head) - p.expectHash(env.clMock.latestForkchoice.headblockHash) + p.expectHash(env.clMock.latestForkchoice.headBlockHash) # Check that the receipt for the transaction we just sent is still not available let q = env.engine.client.txReceipt(shadow.txHash) @@ -90,19 +90,19 @@ method execute(cs: BlockStatus, env: TestEnv): bool = of LatestOnHeadBlockHash: callbacks.onForkchoiceBroadcast = proc(): bool = let r = env.engine.client.namedHeader(Head) - r.expectHash(env.clMock.latestForkchoice.headblockHash) + r.expectHash(env.clMock.latestForkchoice.headBlockHash) let s = env.engine.client.txReceipt(shadow.txHash) s.expectTransactionHash(shadow.txHash) return true of SafeOnSafeBlockHash: callbacks.onSafeBlockChange = proc(): bool = let r = env.engine.client.namedHeader(Safe) - r.expectHash(env.clMock.latestForkchoice.safeblockHash) + r.expectHash(env.clMock.latestForkchoice.safeBlockHash) return true of FinalizedOnFinalizedBlockHash: callbacks.onFinalizedBlockChange = proc(): bool = let r = env.engine.client.namedHeader(Finalized) - r.expectHash(env.clMock.latestForkchoice.finalizedblockHash) + r.expectHash(env.clMock.latestForkchoice.finalizedBlockHash) return true # Perform the test diff --git a/hive_integration/nodocker/engine/engine/suggested_fee_recipient.nim b/hive_integration/nodocker/engine/engine/suggested_fee_recipient.nim index 515ac8ac02..853876c65e 100644 --- a/hive_integration/nodocker/engine/engine/suggested_fee_recipient.nim +++ b/hive_integration/nodocker/engine/engine/suggested_fee_recipient.nim @@ -9,8 +9,8 @@ # according to those terms. import - std/strutils, chronicles, + eth/common/eth_types_rlp, ./engine_spec, ../../../../nimbus/transaction @@ -62,8 +62,8 @@ method execute(cs: SuggestedFeeRecipientTest, env: TestEnv): bool = let blockIncluded = r.get - testCond blockIncluded.txs.len == cs.transactionCount: - error "expect transactions", get=blockIncluded.txs.len, expect=cs.transactionCount + testCond blockIncluded.transactions.len == cs.transactionCount: + error "expect transactions", get=blockIncluded.transactions.len, expect=cs.transactionCount testCond feeRecipient == blockIncluded.header.coinbase: error "expect coinbase", @@ -71,7 +71,7 @@ method execute(cs: SuggestedFeeRecipientTest, env: TestEnv): bool = expect=feeRecipient var feeRecipientFees = 0.u256 - for tx in blockIncluded.txs: + for tx in blockIncluded.transactions: let effGasTip = tx.effectiveGasTip(blockIncluded.header.baseFeePerGas) let r = env.engine.client.txReceipt(tx.rlpHash) diff --git a/hive_integration/nodocker/engine/engine/versioning.nim b/hive_integration/nodocker/engine/engine/versioning.nim index 65036fbc16..0450ffc06b 100644 --- a/hive_integration/nodocker/engine/engine/versioning.nim +++ b/hive_integration/nodocker/engine/engine/versioning.nim @@ -59,7 +59,7 @@ method execute(cs: ForkchoiceUpdatedOnPayloadRequestTest, env: TestEnv): bool = cs.forkchoiceUpdatedCustomizer.setEngineAPIVersionResolver(env.engine.com) let version = cs.forkchoiceUpdatedCustomizer.forkchoiceUpdatedVersion(env.clMock.latestHeader.timestamp.uint64) - let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr)) + let r = env.engine.forkchoiceUpdated(version, env.clMock.latestForkchoice, attr) #r.ExpectationDescription = cs.Expectation if expectedError != 0: r.expectErrorCode(expectedError) diff --git a/hive_integration/nodocker/engine/engine_client.nim b/hive_integration/nodocker/engine/engine_client.nim index 5dd3d2531b..4e14be591d 100644 --- a/hive_integration/nodocker/engine/engine_client.nim +++ b/hive_integration/nodocker/engine/engine_client.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed except # according to those terms. +{.push raises:[].} + import std/[times, json, strutils], stew/byteutils, @@ -38,6 +40,8 @@ template wrapTry(body: untyped) = return err(e.msg) except JsonRpcError as ex: return err(ex.msg) + except CatchableError as ex: + return err(ex.msg) template wrapTrySimpleRes(body: untyped) = wrapTry: @@ -65,6 +69,17 @@ proc forkchoiceUpdatedV3*(client: RpcClient, wrapTrySimpleRes: client.engine_forkchoiceUpdatedV3(update, payloadAttributes) +proc forkchoiceUpdated*(client: RpcClient, + version: Version, + update: ForkchoiceStateV1, + attr = Opt.none(PayloadAttributes)): + Result[ForkchoiceUpdatedResponse, string] = + case version + of Version.V1: return client.forkchoiceUpdatedV1(update, attr.V1) + of Version.V2: return client.forkchoiceUpdatedV2(update, attr) + of Version.V3: return client.forkchoiceUpdatedV3(update, attr) + of Version.V4: discard + proc getPayloadV1*(client: RpcClient, payloadId: Bytes8): Result[ExecutionPayloadV1, string] = wrapTrySimpleRes: client.engine_getPayloadV1(payloadId) @@ -82,8 +97,8 @@ proc getPayloadV4*(client: RpcClient, payloadId: Bytes8): Result[GetPayloadV4Res client.engine_getPayloadV4(payloadId) proc getPayload*(client: RpcClient, - payloadId: Bytes8, - version: Version): Result[GetPayloadResponse, string] = + version: Version, + payloadId: Bytes8): Result[GetPayloadResponse, string] = if version == Version.V4: let x = client.getPayloadV4(payloadId).valueOr: return err(error) @@ -117,27 +132,6 @@ proc getPayload*(client: RpcClient, executionPayload: executionPayload(x), )) -proc forkchoiceUpdated*(client: RpcClient, - update: ForkchoiceStateV1, - attr: PayloadAttributes): - Result[ForkchoiceUpdatedResponse, string] = - case attr.version - of Version.V1: return client.forkchoiceUpdatedV1(update, Opt.some attr.V1) - of Version.V2: return client.forkchoiceUpdatedV2(update, Opt.some attr) - of Version.V3: return client.forkchoiceUpdatedV3(update, Opt.some attr) - of Version.V4: discard - -proc forkchoiceUpdated*(client: RpcClient, - version: Version, - update: ForkchoiceStateV1, - attr = Opt.none(PayloadAttributes)): - Result[ForkchoiceUpdatedResponse, string] = - case version - of Version.V1: return client.forkchoiceUpdatedV1(update, attr.V1) - of Version.V2: return client.forkchoiceUpdatedV2(update, attr) - of Version.V3: return client.forkchoiceUpdatedV3(update, attr) - of Version.V4: discard - proc newPayloadV1*(client: RpcClient, payload: ExecutionPayloadV1): Result[PayloadStatusV1, string] = @@ -200,62 +194,12 @@ proc newPayloadV4*(client: RpcClient, payload: ExecutionPayload, versionedHashes: Opt[seq[VersionedHash]], parentBeaconBlockRoot: Opt[Hash32], - executionRequests: Opt[array[3, seq[byte]]] - ): + executionRequests: Opt[array[3, seq[byte]]]): Result[PayloadStatusV1, string] = wrapTrySimpleRes: client.engine_newPayloadV4(payload, versionedHashes, parentBeaconBlockRoot, executionRequests) -proc collectBlobHashes(list: openArray[Web3Tx]): seq[Hash32] = - for w3tx in list: - let tx = ethTx(w3tx) - for h in tx.versionedHashes: - result.add h - -proc newPayload*(client: RpcClient, - payload: ExecutionPayload, - beaconRoot = Opt.none(Hash32), - executionRequests = Opt.none(array[3, seq[byte]]) - ): Result[PayloadStatusV1, string] = - case payload.version - of Version.V1: return client.newPayloadV1(payload.V1) - of Version.V2: return client.newPayloadV2(payload.V2) - of Version.V3: - if beaconRoot.isNone: - # fallback - return client.newPayloadV2(payload.V2) - let versionedHashes = collectBlobHashes(payload.transactions) - return client.newPayloadV3(payload.V3, - versionedHashes, - beaconRoot.get) - of Version.V4: - let versionedHashes = collectBlobHashes(payload.transactions) - return client.newPayloadV4(payload.V3, - versionedHashes, - beaconRoot.get, - executionRequests.get) - -proc newPayload*(client: RpcClient, - version: Version, - payload: ExecutionPayload, - beaconRoot = Opt.none(Hash32), - executionRequests = Opt.none(array[3, seq[byte]])): Result[PayloadStatusV1, string] = - case version - of Version.V1: return client.newPayloadV1(payload) - of Version.V2: return client.newPayloadV2(payload) - of Version.V3: - let versionedHashes = collectBlobHashes(payload.transactions) - return client.newPayloadV3(payload, - Opt.some(versionedHashes), - beaconRoot) - of Version.V4: - let versionedHashes = collectBlobHashes(payload.transactions) - return client.newPayloadV4(payload, - Opt.some(versionedHashes), - beaconRoot, - executionRequests) - proc newPayload*(client: RpcClient, version: Version, payload: ExecutableData): Result[PayloadStatusV1, string] = @@ -325,12 +269,17 @@ proc toBlockHeader*(bc: BlockObject): Header = blobGasUsed : maybeU64(bc.blobGasUsed), excessBlobGas : maybeU64(bc.excessBlobGas), parentBeaconBlockRoot: bc.parentBeaconBlockRoot, + requestsHash : bc.requestsHash, ) func vHashes(x: Opt[seq[Hash32]]): seq[VersionedHash] = if x.isNone: return else: x.get +func authList(x: Opt[seq[AuthorizationObject]]): seq[Authorization] = + if x.isNone: return + else: ethAuthList x.get + proc toTransaction(tx: TransactionObject): Transaction = Transaction( txType : tx.`type`.get(0.Web3Quantity).TxType, @@ -349,6 +298,7 @@ proc toTransaction(tx: TransactionObject): Transaction = V : tx.v.uint64, R : tx.r, S : tx.s, + authorizationList: authList(tx.authorizationList), ) proc toTransactions*(txs: openArray[TxOrHash]): seq[Transaction] = @@ -416,6 +366,7 @@ type accessList*: Opt[seq[AccessPair]] maxFeePerBlobGas*: Opt[UInt256] versionedHashes*: Opt[seq[VersionedHash]] + authorizationList*: Opt[seq[Authorization]] proc toRPCReceipt(rec: ReceiptObject): RPCReceipt = RPCReceipt( @@ -464,6 +415,7 @@ proc toRPCTx(tx: eth_api.TransactionObject): RPCTx = Opt.some(vHashes tx.blobVersionedHashes) else: Opt.none(seq[VersionedHash]), + authorizationList: ethAuthList(tx.authorizationList), ) proc waitForTTD*(client: RpcClient, @@ -495,16 +447,6 @@ proc headerByNumber*(client: RpcClient, number: uint64): Result[Header, string] return err("failed to get blockHeader: " & $number) return ok(res.toBlockHeader) -#proc blockByNumber*(client: RpcClient, number: uint64, output: var Block): Result[void, string] = -# wrapTry: -# let res = waitFor client.eth_getBlockByNumber(blockId(number), true) -# if res.isNil: -# return err("failed to get block: " & $number) -# output.header = toBlockHeader(res) -# output.txs = toTransactions(res.transactions) -# output.withdrawals = toWithdrawals(res.withdrawals) -# return ok() - proc headerByHash*(client: RpcClient, hash: Hash32): Result[Header, string] = wrapTry: let res = waitFor client.eth_getBlockByHash(hash, false) @@ -633,7 +575,7 @@ createRpcSigsFromNim(RpcClient): proc debugPrevRandaoTransaction*( client: RpcClient, tx: PooledTransaction, - expectedPrevRandao: Hash32): Result[void, string] = + expectedPrevRandao: Bytes32): Result[void, string] = wrapTry: let hash = tx.rlpHash # we only interested in stack, disable all other elems @@ -660,7 +602,7 @@ proc debugPrevRandaoTransaction*( if stack.len < 1: return err("Invalid stack after PREVRANDAO operation") - let stackHash = Hash32(hextoByteArray[32](stack[0].getStr)) + let stackHash = Bytes32(hexToByteArray[32](stack[0].getStr)) if stackHash != expectedPrevRandao: return err("Invalid stack after PREVRANDAO operation $1 != $2" % [stackHash.data.toHex, expectedPrevRandao.data.toHex]) diff --git a/hive_integration/nodocker/engine/engine_env.nim b/hive_integration/nodocker/engine/engine_env.nim index 1b6d1e6dc7..eaef107a4f 100644 --- a/hive_integration/nodocker/engine/engine_env.nim +++ b/hive_integration/nodocker/engine/engine_env.nim @@ -29,7 +29,9 @@ import common ], ../../../tests/test_helpers, - web3/execution_types + web3/execution_types, + ./engine_client, + ./types from ./node import setBlock @@ -80,7 +82,7 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E let node = setupEthNode(conf, ctx) com = makeCom(conf) - head = com.db.getCanonicalHead() + head = com.db.getCanonicalHead().expect("canonical head exists") chain = newForkedChain(com, head) let txPool = TxPoolRef.new(com) @@ -109,7 +111,7 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E beaconEngine = BeaconEngineRef.new(txPool, chain) serverApi = newServerAPI(chain, txPool) - setupServerAPI(serverApi, server) + setupServerAPI(serverApi, server, ctx) setupEngineAPI(beaconEngine, server) # temporary disabled #setupDebugRpc(com, txPool, server) @@ -186,7 +188,9 @@ proc numTxsInPool*(env: EngineEnv): int = env.txPool.numTxs func version*(env: EngineEnv, time: EthTime): Version = - if env.com.isCancunOrLater(time): + if env.com.isPragueOrLater(time): + Version.V4 + elif env.com.isCancunOrLater(time): Version.V3 elif env.com.isShanghaiOrLater(time): Version.V2 @@ -203,3 +207,54 @@ proc setBlock*(env: EngineEnv, blk: common.EthBlock): bool = # env.chain.setBlock(blk).isOk() debugEcho "TODO: fix setBlock" false + +proc newPayload*(env: EngineEnv, + payload: ExecutableData): Result[PayloadStatusV1, string] = + let version = env.version(payload.basePayload.timestamp) + env.client.newPayload(version, payload) + +proc newPayload*(env: EngineEnv, + version: Version, + payload: ExecutableData): Result[PayloadStatusV1, string] = + env.client.newPayload(version, payload) + +proc getPayload*(env: EngineEnv, + timestamp: uint64 | Web3Quantity | EthTime, + payloadId: Bytes8): Result[GetPayloadResponse, string] = + let version = env.version(timestamp) + env.client.getPayload(version, payloadId) + +proc getPayload*(env: EngineEnv, + version: Version, + payloadId: Bytes8): Result[GetPayloadResponse, string] = + env.client.getPayload(version, payloadId) + +proc forkchoiceUpdated*(env: EngineEnv, + timestamp: uint64 | Web3Quantity | EthTime, + update: ForkchoiceStateV1, + attr = Opt.none(PayloadAttributes)): + Result[ForkchoiceUpdatedResponse, string] = + let version = env.version(timestamp) + env.client.forkchoiceUpdated(version, update, attr) + +proc forkchoiceUpdated*(env: EngineEnv, + timestamp: uint64 | Web3Quantity | EthTime, + update: ForkchoiceStateV1, + attr: PayloadAttributes): + Result[ForkchoiceUpdatedResponse, string] = + let version = env.version(timestamp) + env.client.forkchoiceUpdated(version, update, Opt.some(attr)) + +proc forkchoiceUpdated*(env: EngineEnv, + version: Version, + update: ForkchoiceStateV1, + attr = Opt.none(PayloadAttributes)): + Result[ForkchoiceUpdatedResponse, string] = + env.client.forkchoiceUpdated(version, update, attr) + +proc forkchoiceUpdated*(env: EngineEnv, + version: Version, + update: ForkchoiceStateV1, + attr: PayloadAttributes): + Result[ForkchoiceUpdatedResponse, string] = + env.client.forkchoiceUpdated(version, update, Opt.some(attr)) diff --git a/hive_integration/nodocker/engine/engine_sim.nim b/hive_integration/nodocker/engine/engine_sim.nim index faa0be2391..4100c8b95f 100644 --- a/hive_integration/nodocker/engine/engine_sim.nim +++ b/hive_integration/nodocker/engine/engine_sim.nim @@ -17,11 +17,11 @@ import ../../../nimbus/core/eip4844 import - # ./engine_tests, + ./engine_tests, ./auths_tests, - ./exchange_cap_tests#, - #./withdrawal_tests, - #./cancun_tests + ./exchange_cap_tests, + ./withdrawal_tests, + ./cancun_tests proc combineTests(): seq[TestDesc] = #result.add wdTestList diff --git a/hive_integration/nodocker/engine/helper.nim b/hive_integration/nodocker/engine/helper.nim index 6e7ed886a5..e66369c9d4 100644 --- a/hive_integration/nodocker/engine/helper.nim +++ b/hive_integration/nodocker/engine/helper.nim @@ -18,13 +18,12 @@ import proc txInPayload*(payload: ExecutionPayload, txHash: Hash32): bool = for txBytes in payload.transactions: - let currTx = rlp.decode(common.Blob txBytes, Transaction) + let currTx = rlp.decode(seq[byte](txBytes), Transaction) if rlpHash(currTx) == txHash: return true -proc checkPrevRandaoValue*(client: RpcClient, expectedPrevRandao: Hash32, blockNumber: uint64): bool = +proc checkPrevRandaoValue*(client: RpcClient, expectedPrevRandao: Bytes32, blockNumber: uint64): bool = let storageKey = blockNumber.u256 let r = client.storageAt(prevRandaoContractAddr, storageKey) - let expected = FixedBytes[32](expectedPrevRandao.data) - r.expectStorageEqual(expected) + r.expectStorageEqual(expectedPrevRandao) return true diff --git a/hive_integration/nodocker/engine/node.nim b/hive_integration/nodocker/engine/node.nim index 8109ba90c3..cb7f806e31 100644 --- a/hive_integration/nodocker/engine/node.nim +++ b/hive_integration/nodocker/engine/node.nim @@ -99,21 +99,17 @@ proc setBlock*(c: ChainRef; blk: Block): Result[void, string] = let vmState = c.getVmState(header).valueOr: return err("no vmstate") - stateRootChpt = vmState.parent.stateRoot # Check point + _ = vmState.parent.stateRoot # Check point ? vmState.processBlock(blk) - if not c.db.persistHeader( - header, c.com.proofOfStake(header), c.com.startOfHistory): - return err("Could not persist header") + ? c.db.persistHeader( + header, c.com.proofOfStake(header), c.com.startOfHistory) - try: - c.db.persistTransactions(header.number, header.txRoot, blk.transactions) - c.db.persistReceipts(header.receiptsRoot, vmState.receipts) + c.db.persistTransactions(header.number, header.txRoot, blk.transactions) + c.db.persistReceipts(header.receiptsRoot, vmState.receipts) - if blk.withdrawals.isSome: - c.db.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get) - except CatchableError as exc: - return err(exc.msg) + if blk.withdrawals.isSome: + c.db.persistWithdrawals(header.withdrawalsRoot.get, blk.withdrawals.get) # update currentBlock *after* we persist it # so the rpc return consistent result diff --git a/hive_integration/nodocker/engine/tx_sender.nim b/hive_integration/nodocker/engine/tx_sender.nim index 5efc7b0de6..195a4a027c 100644 --- a/hive_integration/nodocker/engine/tx_sender.nim +++ b/hive_integration/nodocker/engine/tx_sender.nim @@ -52,10 +52,10 @@ type accounts: seq[TestAccount] nonceMap: Table[Address, uint64] txSent : int - chainId : ChainID + chainId : ChainId MakeTxParams* = object - chainId*: ChainID + chainId*: ChainId key* : PrivateKey nonce* : AccountNonce @@ -128,7 +128,7 @@ proc getTxType(tc: BaseTx, nonce: uint64): TxType = if nonce mod 2 == 0: TxLegacy else: - TxEIP1559 + TxEip1559 else: tc.txType.get @@ -157,7 +157,7 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): PooledTransaction = of TxEip1559: PooledTransaction( tx: Transaction( - txType : TxEIP1559, + txType : TxEip1559, nonce : params.nonce, gasLimit: tc.gasLimit, maxFeePerGas: gasFeeCap, @@ -182,7 +182,7 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): PooledTransaction = PooledTransaction( tx: Transaction( - txType : TxEIP4844, + txType : TxEip4844, nonce : params.nonce, chainId : params.chainId, maxFeePerGas: gasFeeCap, @@ -433,7 +433,7 @@ proc customizeTransaction*(sender: TxSender, if baseTx.txType in {TxEip1559, TxEip4844}: if custTx.gasPriceOrGasFeeCap.isSome: - modTx.maxFeePErGas = custTx.gasPriceOrGasFeeCap.get.GasInt + modTx.maxFeePerGas = custTx.gasPriceOrGasFeeCap.get.GasInt if custTx.gasTipCap.isSome: modTx.maxPriorityFeePerGas = custTx.gasTipCap.get.GasInt diff --git a/hive_integration/nodocker/engine/types.nim b/hive_integration/nodocker/engine/types.nim index 7d35ef16b9..37820f7ae8 100644 --- a/hive_integration/nodocker/engine/types.nim +++ b/hive_integration/nodocker/engine/types.nim @@ -57,10 +57,10 @@ type ExecutableData* = object basePayload* : ExecutionPayload - beaconRoot* : Opt[Hash32] - executionRequests*: Opt[array[3, seq[byte]]] attr* : PayloadAttributes + beaconRoot* : Opt[Hash32] versionedHashes*: Opt[seq[Hash32]] + executionRequests*: Opt[array[3, seq[byte]]] const DefaultTimeout* = 60 # seconds @@ -85,11 +85,14 @@ func timestampToBeaconRoot*(timestamp: Quantity): Hash32 = let h = sha2.sha256.digest(timestamp.uint64.toBytesBE) Hash32(h.data) +proc randomBytes*(_: type Bytes32): Bytes32 = + doAssert randomBytes(result.data) == 32 + proc randomBytes*(_: type Hash32): Hash32 = doAssert randomBytes(result.data) == 32 proc randomBytes*(_: type Address): Address = - doAssert randomBytes(result) == 20 + doAssert randomBytes(result.data) == 20 proc clone*[T](x: T): T = result = T() @@ -321,3 +324,27 @@ proc `parentHash=`*(x: var ExecutableData, val: auto) = proc `blockHash=`*(x: var ExecutableData, val: auto) = x.basePayload.blockHash = val + +proc collectBlobHashes*(list: openArray[Web3Tx]): seq[Hash32] = + for w3tx in list: + let tx = ethTx(w3tx) + for h in tx.versionedHashes: + result.add h + +func toExecutableData*(res: GetPayloadResponse, attr: PayloadAttributes): ExecutableData = + ExecutableData( + basePayload: res.executionPayload, + attr: attr, + beaconRoot: attr.parentBeaconBlockRoot, + versionedHashes: Opt.some(collectBlobHashes(res.executionPayload.transactions)), + executionRequests: res.executionRequests, + ) + +func toExecutableData*(payload: ExecutionPayload, attr: PayloadAttributes): ExecutableData = + ExecutableData( + basePayload: payload, + attr: attr, + beaconRoot: attr.parentBeaconBlockRoot, + versionedHashes: Opt.some(collectBlobHashes(payload.transactions)), + ) + \ No newline at end of file diff --git a/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim index 847832973a..f097006630 100644 --- a/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim +++ b/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim @@ -12,7 +12,6 @@ import stint, chronicles, chronos, - stew/byteutils, web3/eth_api_types, ./wd_history, ../test_env, @@ -42,8 +41,8 @@ type nextIndex*: int const - WARM_COINBASE_ADDRESS = hexToByteArray[20]("0x0101010101010101010101010101010101010101") - PUSH0_ADDRESS = hexToByteArray[20]("0x0202020202020202020202020202020202020202") + WARM_COINBASE_ADDRESS = address"0x0101010101010101010101010101010101010101" + PUSH0_ADDRESS = address"0x0202020202020202020202020202020202020202" MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK* = 16 TX_CONTRACT_ADDRESSES = [ WARM_COINBASE_ADDRESS, @@ -247,7 +246,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool = # withdrawals before Shanghai var r = env.client.forkchoiceUpdatedV2( ForkchoiceStateV1( - headBlockHash: env.clMock.latestHeader, + headBlockHash: env.clMock.latestHeader.blockHash, ), Opt.some(PayloadAttributes( timestamp: w3Qty(env.clMock.latestHeader.timestamp, ws.getBlockTimeIncrements()), @@ -263,7 +262,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool = # (clMock uses V1 by default) r = env.client.forkchoiceUpdatedV2( ForkchoiceStateV1( - headBlockHash: env.clMock.latestHeader, + headBlockHash: env.clMock.latestHeader.blockHash, ), Opt.some(PayloadAttributes( timestamp: w3Qty(env.clMock.latestHeader.timestamp, ws.getBlockTimeIncrements()), @@ -335,7 +334,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool = # Shanghai let r = env.client.forkchoiceUpdatedV2( ForkchoiceStateV1( - headBlockHash: env.clMock.latestHeader, + headBlockHash: env.clMock.latestHeader.blockHash, ), Opt.some(PayloadAttributes( timestamp: w3Qty(env.clMock.latestHeader.timestamp, ws.getBlockTimeIncrements()), diff --git a/hive_integration/nodocker/engine/withdrawals/wd_history.nim b/hive_integration/nodocker/engine/withdrawals/wd_history.nim index 783dd1e8bc..d4140edcf6 100644 --- a/hive_integration/nodocker/engine/withdrawals/wd_history.nim +++ b/hive_integration/nodocker/engine/withdrawals/wd_history.nim @@ -12,7 +12,6 @@ import std/[tables, sets, strutils], eth/common/eth_types as common, json_rpc/[rpcclient], - stew/byteutils, results, ../engine_client, ../../../../nimbus/utils/utils, diff --git a/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim index 0e761ace14..1c719c7f48 100644 --- a/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim +++ b/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim @@ -20,7 +20,8 @@ import ../engine_client, ../types, ../base_spec, - ../../../../nimbus/beacon/web3_eth_conv + ../../../../nimbus/beacon/web3_eth_conv, + ../../../../nimbus/utils/utils # Withdrawals re-org spec: # Specifies a withdrawals test where the withdrawals re-org can happen @@ -38,8 +39,8 @@ type startAccount: UInt256 nextIndex : int wdHistory : WDHistory - sidechain : Table[uint64, ExecutionPayload] - payloadId : PayloadID + sidechain : Table[uint64, ExecutableData] + payloadId : Bytes8 height : uint64 attr : Opt[PayloadAttributes] @@ -91,7 +92,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = startAccount: 1.u256 shl 160, nextIndex : 0, wdHistory : WDHistory(), - sidechain : Table[uint64, ExecutionPayload]() + sidechain : Table[uint64, ExecutableData]() ) # Sidechain withdraws on the max account value range 0xffffffffffffffffffffffffffffffffffffffff @@ -176,38 +177,39 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = number=env.clMock.currentPayloadNumber sidechain.attr = Opt.some(attr) - let r = sec.client.forkchoiceUpdated(fcState, attr) + let r = sec.forkchoiceUpdated(attr.timestamp, fcState, attr) r.expectNoError() r.expectPayloadStatus(PayloadExecutionStatus.valid) - testCond r.get().payloadID.isSome: + testCond r.get().payloadId.isSome: error "Unable to get a payload ID on the sidechain" - sidechain.payloadId = r.get().payloadID.get() + sidechain.payloadId = r.get().payloadId.get() return true , onGetPayload: proc(): bool = var - payload: ExecutionPayload + payload: ExecutableData if env.clMock.latestPayloadBuilt.blockNumber.uint64 >= ws.getSidechainSplitHeight().uint64: # This payload is built by the secondary client, hence need to manually fetch it here doAssert(sidechain.attr.isSome) - let version = sidechain.attr.get().version - let r = sec.client.getPayload(sidechain.payloadId, version) + let attr = sidechain.attr.get() + let timeVer = attr.timestamp + let r = sec.getPayload(timeVer, sidechain.payloadId) r.expectNoError() - payload = r.get().executionPayload + payload = r.get().toExecutableData(attr) sidechain.sidechain[payload.blockNumber.uint64] = payload else: # This block is part of both chains, simply forward it to the secondary client - payload = env.clMock.latestPayloadBuilt + payload = env.clMock.latestExecutedPayload - let r = sec.client.newPayload(payload) + let r = sec.newPayload(payload) r.expectStatus(PayloadExecutionStatus.valid) let fcState = ForkchoiceStateV1( headBlockHash: payload.blockHash, ) - let p = sec.client.forkchoiceUpdated(payload.version, fcState) + let p = sec.forkchoiceUpdated(payload.timestamp, fcState) p.expectPayloadStatus(PayloadExecutionStatus.valid) return true )) @@ -246,7 +248,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = let r = sec.client.forkchoiceUpdatedV2(fcState, Opt.some(attr)) r.expectPayloadStatus(PayloadExecutionStatus.valid) - let p = sec.client.getPayloadV2(r.get().payloadID.get) + let p = sec.client.getPayloadV2(r.get().payloadId.get) p.expectNoError() let z = p.get() @@ -259,7 +261,8 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = q.expectPayloadStatus(PayloadExecutionStatus.valid) inc sidechain.height - sidechain.sidechain[sidechain.height] = executionPayload(z.executionPayload) + let tmp = executionPayload(z.executionPayload) + sidechain.sidechain[sidechain.height] = tmp.toExecutableData(attr) # Check the withdrawals on the latest let res = ws.wdHistory.verifyWithdrawals(sidechain.height, Opt.none(uint64), env.client) @@ -278,7 +281,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = ws.timeoutSeconds = DefaultTimeout while loop < ws.timeoutSeconds: - let r = env.client.newPayloadV2(payload.V2) + let r = env.client.newPayloadV2(payload.basePayload.V2) r.expectNoError() let fcState = ForkchoiceStateV1(headBlockHash: sideHash) let p = env.client.forkchoiceUpdatedV2(fcState) @@ -312,11 +315,11 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = hash=payload.blockHash.short, parentHash=payload.parentHash.short - let r = env.client.newPayload(payload) + let r = env.engine.newPayload(version, payload) r.expectStatusEither([PayloadExecutionStatus.valid, PayloadExecutionStatus.accepted]) let fcState = ForkchoiceStateV1(headBlockHash: payload.blockHash) - let p = env.client.forkchoiceUpdated(version, fcState) + let p = env.engine.forkchoiceUpdated(version, fcState) p.expectPayloadStatus(PayloadExecutionStatus.valid) inc payloadNumber diff --git a/hive_integration/nodocker/pyspec/pyspec_sim.nim b/hive_integration/nodocker/pyspec/pyspec_sim.nim index 575d3ff7b1..f0e0a41890 100644 --- a/hive_integration/nodocker/pyspec/pyspec_sim.nim +++ b/hive_integration/nodocker/pyspec/pyspec_sim.nim @@ -20,6 +20,7 @@ import ../../../nimbus/beacon/payload_conv, ../../../nimbus/core/eip4844, ../engine/engine_client, + ../engine/types, ./test_env const @@ -35,8 +36,7 @@ const type Payload = object badBlock: bool - payload: ExecutionPayload - beaconRoot: Opt[Hash32] + payload: ExecutableData proc getPayload(node: JsonNode): Payload = try: @@ -45,8 +45,10 @@ proc getPayload(node: JsonNode): Payload = blk = rlp.decode(rlpBytes, EthBlock) Payload( badBlock: false, - payload: executionPayload(blk), - beaconRoot: blk.header.parentBeaconBlockRoot, + payload: ExecutableData( + basePayload: executionPayload(blk), + beaconRoot: blk.header.parentBeaconBlockRoot, + ) ) except RlpError: Payload( @@ -141,7 +143,7 @@ proc runTest(node: JsonNode, network: string): TestStatus = latestVersion = payload.payload.version - let res = env.rpcClient.newPayload(payload.payload, payload.beaconRoot) + let res = env.rpcClient.newPayload(latestVersion, payload.payload) if res.isErr: result = TestStatus.Failed echo "unable to send block ", diff --git a/hive_integration/nodocker/pyspec/test_env.nim b/hive_integration/nodocker/pyspec/test_env.nim index e8dd7012f5..c6f308d08a 100644 --- a/hive_integration/nodocker/pyspec/test_env.nim +++ b/hive_integration/nodocker/pyspec/test_env.nim @@ -13,7 +13,6 @@ import stew/[byteutils], json_rpc/[rpcserver, rpcclient], ../../../nimbus/[ - config, constants, transaction, db/ledger, @@ -47,10 +46,10 @@ proc setupELClient*(conf: ChainConfig, node: JsonNode): TestEnv = stateDB.persist() doAssert stateDB.getStateRoot == genesisHeader.stateRoot - doAssert com.db.persistHeader(genesisHeader, - com.proofOfStake(genesisHeader)) - doAssert(com.db.getCanonicalHead().blockHash == - genesisHeader.blockHash) + com.db.persistHeader(genesisHeader, + com.proofOfStake(genesisHeader)).expect("persistHeader no error") + let head = com.db.getCanonicalHead().expect("canonical head exists") + doAssert(head.blockHash == genesisHeader.blockHash) let txPool = TxPoolRef.new(com) @@ -59,7 +58,7 @@ proc setupELClient*(conf: ChainConfig, node: JsonNode): TestEnv = rpcServer = newRpcHttpServer(["127.0.0.1:0"]) rpcClient = newRpcHttpClient() - setupServerAPI(serverApi, rpcServer) + setupServerAPI(serverApi, rpcServer, newEthContext()) setupEngineAPI(beaconEngine, rpcServer) rpcServer.start() diff --git a/hive_integration/nodocker/rpc/client.nim b/hive_integration/nodocker/rpc/client.nim index 44086ea2aa..c8bf600dd9 100644 --- a/hive_integration/nodocker/rpc/client.nim +++ b/hive_integration/nodocker/rpc/client.nim @@ -43,11 +43,6 @@ proc nonceAt*(client: RpcClient, address: Address): Future[AccountNonce] {.async let hex = await client.eth_getTransactionCount(address, blockId("latest")) result = hex.AccountNonce -func toTopics(list: openArray[Hash32]): seq[eth_types.Topic] = - result = newSeqOfCap[eth_types.Topic](list.len) - for x in list: - result.add eth_types.Topic(x) - func toLogs(list: openArray[LogObject]): seq[Log] = result = newSeqOfCap[Log](list.len) for x in list: diff --git a/hive_integration/nodocker/rpc/test_env.nim b/hive_integration/nodocker/rpc/test_env.nim index 0d5237d4c3..4019b16202 100644 --- a/hive_integration/nodocker/rpc/test_env.nim +++ b/hive_integration/nodocker/rpc/test_env.nim @@ -16,8 +16,7 @@ import ../../../nimbus/common, ../../../nimbus/config, ../../../nimbus/rpc, - ../../../nimbus/rpc/oracle, - ../../../nimbus/rpc/p2p, + ../../../nimbus/rpc/server_api, ../../../nimbus/utils/utils, ../../../nimbus/core/[chain, tx_pool], ../../../tests/test_helpers, @@ -35,7 +34,7 @@ type const initPath = "hive_integration" / "nodocker" / "rpc" / "init" gasPrice* = 30.gwei - chainID* = ChainID(7) + chainID* = ChainId(7) proc manageAccounts(ctx: EthContext, conf: NimbusConf) = if string(conf.importKey).len > 0: @@ -46,11 +45,14 @@ proc manageAccounts(ctx: EthContext, conf: NimbusConf) = proc setupRpcServer(ctx: EthContext, com: CommonRef, ethNode: EthereumNode, txPool: TxPoolRef, - conf: NimbusConf): RpcServer = - let rpcServer = newRpcHttpServer([initTAddress(conf.httpAddress, conf.httpPort)]) - let oracle = Oracle.new(com) + conf: NimbusConf, chain: ForkedChainRef): RpcServer = + let + rpcServer = newRpcHttpServer([initTAddress(conf.httpAddress, conf.httpPort)]) + serverApi = newServerAPI(chain, txPool) + + setupCommonRpc(ethNode, conf, rpcServer) - setupEthRpc(ethNode, ctx, com, txPool, oracle, rpcServer) + setupServerAPI(serverApi, rpcServer, ctx) rpcServer.start() rpcServer @@ -84,7 +86,7 @@ proc setupEnv*(): TestEnv = manageAccounts(ethCtx, conf) - let head = com.db.getCanonicalHead() + let head = com.db.getCanonicalHead().expect("canonical head exists") let chainRef = newForkedChain(com, head) let txPool = TxPoolRef.new(com) @@ -92,7 +94,7 @@ proc setupEnv*(): TestEnv = # so it can know the latest account state doAssert txPool.smartHead(head, chainRef) - let rpcServer = setupRpcServer(ethCtx, com, ethNode, txPool, conf) + let rpcServer = setupRpcServer(ethCtx, com, ethNode, txPool, conf, chainRef) let rpcClient = newRpcHttpClient() waitFor rpcClient.connect("127.0.0.1", Port(8545), false) let stopServer = stopRpcHttpServer diff --git a/hive_integration/nodocker/rpc/vault.nim b/hive_integration/nodocker/rpc/vault.nim index f21198c055..ba09da2a10 100644 --- a/hive_integration/nodocker/rpc/vault.nim +++ b/hive_integration/nodocker/rpc/vault.nim @@ -42,12 +42,12 @@ type accounts: Table[Address, PrivateKey] rng: ref HmacDrbgContext - chainId: ChainID + chainId: ChainId gasPrice: GasInt vaultKey: PrivateKey client: RpcClient -proc newVault*(chainID: ChainID, gasPrice: GasInt, client: RpcClient): Vault = +proc newVault*(chainID: ChainId, gasPrice: GasInt, client: RpcClient): Vault = new(result) result.rng = newRng() result.chainId = chainID diff --git a/kurtosis-network-params.yml b/kurtosis-network-params.yml index af4052d4fb..61d2455c7a 100644 --- a/kurtosis-network-params.yml +++ b/kurtosis-network-params.yml @@ -14,7 +14,7 @@ participants: el_extra_params: ["--log-level=DEBUG"] cl_type: nimbus cl_image: statusim/nimbus-eth2:multiarch-latest - cl_extra_params: ["--log-level=DEBUG;INFO:gossip_eth2,attpool,libp2p,gossipsub,pubsubpeer,pubsub,switch,networking,sync,dialer,identify,syncman,connmanager,beacnde,lightcl,requman,gossip_lc,clearance,lpstream,mplexchannel,nodes-verification,tcptransport,chaindag,noise,eth,p2p,discv5,muxedupgrade,multistream,connection,secure,fee_recipient,mplex,syncpool,multiaddress,peer_proto;WARN:message_router"] + cl_extra_params: ["--log-level=DEBUG"] use_separate_vc: false additional_services: - tx_spammer @@ -23,7 +23,7 @@ additional_services: - blob_spammer mev_type: null assertoor_params: - image: "ethpandaops/assertoor:latest" + image: "ethpandaops/assertoor" run_stability_check: false run_block_proposal_check: true run_transaction_test: true diff --git a/nimbus/beacon/api_handler/api_exchangeconf.nim b/nimbus/beacon/api_handler/api_exchangeconf.nim index 2a34a741a7..0b54bc50ab 100644 --- a/nimbus/beacon/api_handler/api_exchangeconf.nim +++ b/nimbus/beacon/api_handler/api_exchangeconf.nim @@ -43,20 +43,17 @@ proc exchangeConf*(ben: BeaconEngineRef, terminalBlockHash = conf.terminalBlockHash if terminalBlockHash != default(Hash32): - var headerHash: Hash32 - - if not db.getBlockHash(terminalBlockNumber, headerHash): - raise newException(ValueError, "cannot get terminal block hash, number $1" % - [$terminalBlockNumber]) + let headerHash = db.getBlockHash(terminalBlockNumber).valueOr: + raise newException(ValueError, "cannot get terminal block hash, number $1, msg: $2" % + [$terminalBlockNumber, error]) if terminalBlockHash != headerHash: raise newException(ValueError, "invalid terminal block hash, got $1 want $2" % [$terminalBlockHash, $headerHash]) - var header: Header - if not db.getBlockHeader(headerHash, header): - raise newException(ValueError, "cannot get terminal block header, hash $1" % - [$terminalBlockHash]) + let header = db.getBlockHeader(headerHash).valueOr: + raise newException(ValueError, "cannot get terminal block header, hash $1, msg: $2" % + [$terminalBlockHash, error]) return TransitionConfigurationV1( terminalTotalDifficulty: ttd.get, diff --git a/nimbus/beacon/api_handler/api_forkchoice.nim b/nimbus/beacon/api_handler/api_forkchoice.nim index 23af4c4192..d88ad4c950 100644 --- a/nimbus/beacon/api_handler/api_forkchoice.nim +++ b/nimbus/beacon/api_handler/api_forkchoice.nim @@ -124,11 +124,12 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, if apiVersion == Version.V1: let blockNumber = header.number if header.difficulty > 0.u256 or blockNumber == 0'u64: - var - td, ptd: DifficultyInt + let + td = db.getScore(blockHash) + ptd = db.getScore(header.parentHash) ttd = com.ttd.get(high(UInt256)) - if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)): + if td.isNone or (blockNumber > 0'u64 and ptd.isNone): error "TDs unavailable for TTD check", number = blockNumber, hash = blockHash.short, @@ -137,12 +138,12 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, ptd = ptd return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TTD check") - if td < ttd or (blockNumber > 0'u64 and ptd > ttd): + if td.get < ttd or (blockNumber > 0'u64 and ptd.get > ttd): notice "Refusing beacon update to pre-merge", number = blockNumber, hash = blockHash.short, diff = header.difficulty, - ptd = ptd, + ptd = ptd.get, ttd = ttd return invalidFCU("Refusing beacon update to pre-merge") @@ -176,7 +177,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, db.safeHeaderHash(safeBlockHash) chain.forkChoice(blockHash, finalizedBlockHash).isOkOr: - return invalidFCU(error, com, header) + return invalidFCU(error, chain, header) # If payload generation was requested, create a new block to be potentially # sealed by the beacon client. The payload will be requested later, and we @@ -185,23 +186,19 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef, let attrs = attrsOpt.get() validateVersion(attrs, com, apiVersion) - let bundle = ben.generatePayload(attrs).valueOr: + let bundle = ben.generateExecutionBundle(attrs).valueOr: error "Failed to create sealing payload", err = error raise invalidAttr(error) let id = computePayloadId(blockHash, attrs) - ben.put(id, - bundle.blockValue, - bundle.executionPayload, - bundle.blobsBundle, - bundle.executionRequests) + ben.put(id, bundle) info "Created payload for block proposal", - number = bundle.executionPayload.blockNumber, - hash = bundle.executionPayload.blockHash.short, - txs = bundle.executionPayload.transactions.len, - gasUsed = bundle.executionPayload.gasUsed, - blobGasUsed = bundle.executionPayload.blobGasUsed.get(Quantity(0)), + number = bundle.payload.blockNumber, + hash = bundle.payload.blockHash.short, + txs = bundle.payload.transactions.len, + gasUsed = bundle.payload.gasUsed, + blobGasUsed = bundle.payload.blobGasUsed.get(Quantity(0)), id = id.toHex, attrs = attrs diff --git a/nimbus/beacon/api_handler/api_getbodies.nim b/nimbus/beacon/api_handler/api_getbodies.nim index 457290e7b6..ee0a9b4b39 100644 --- a/nimbus/beacon/api_handler/api_getbodies.nim +++ b/nimbus/beacon/api_handler/api_getbodies.nim @@ -13,7 +13,6 @@ import ../web3_eth_conv, ../beacon_engine, web3/execution_types, - ../../db/core_db, ./api_utils {.push gcsafe, raises:[CatchableError].} @@ -21,33 +20,6 @@ import const maxBodyRequest = 32 -proc getPayloadBodyByHeader(db: CoreDbRef, - header: Header, - output: var seq[Opt[ExecutionPayloadBodyV1]]) {.raises:[].} = - - var body: BlockBody - if not db.getBlockBody(header, body): - output.add Opt.none(ExecutionPayloadBodyV1) - return - - let txs = w3Txs body.transactions - var wds: seq[WithdrawalV1] - if body.withdrawals.isSome: - for w in body.withdrawals.get: - wds.add w3Withdrawal(w) - - output.add( - Opt.some(ExecutionPayloadBodyV1( - transactions: txs, - # pre Shanghai block return null withdrawals - # post Shanghai block return at least empty slice - withdrawals: if header.withdrawalsRoot.isSome: - Opt.some(wds) - else: - Opt.none(seq[WithdrawalV1]) - )) - ) - func toPayloadBody(blk: Block): ExecutionPayloadBodyV1 {.raises:[].} = var wds: seq[WithdrawalV1] if blk.withdrawals.isSome: @@ -88,12 +60,8 @@ proc getPayloadBodiesByRange*(ben: BeaconEngineRef, if count > maxBodyRequest: raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest) - let - db = ben.com.db - var last = start+count-1 - header: Header if start > ben.chain.latestNumber: # requested range beyond the latest known block @@ -104,10 +72,10 @@ proc getPayloadBodiesByRange*(ben: BeaconEngineRef, # get bodies from database for bn in start..ben.chain.baseNumber: - if not db.getBlockHeader(bn, header): + let blk = ben.chain.blockByNumber(bn).valueOr: result.add Opt.none(ExecutionPayloadBodyV1) continue - db.getPayloadBodyByHeader(header, result) + result.add Opt.some(blk.toPayloadBody) if last > ben.chain.baseNumber: let blocks = ben.chain.blockFromBaseTo(last) diff --git a/nimbus/beacon/api_handler/api_getpayload.nim b/nimbus/beacon/api_handler/api_getpayload.nim index c4aaf99e00..6a3dfb9e5f 100644 --- a/nimbus/beacon/api_handler/api_getpayload.nim +++ b/nimbus/beacon/api_handler/api_getpayload.nim @@ -23,51 +23,46 @@ proc getPayload*(ben: BeaconEngineRef, trace "Engine API request received", meth = "GetPayload", id - var payloadGeneric: ExecutionPayload - var blockValue: UInt256 - var blobsBundle: Opt[BlobsBundleV1] - if not ben.get(id, blockValue, payloadGeneric, blobsBundle): - raise unknownPayload("Unknown payload") + var bundle: ExecutionBundle + if not ben.get(id, bundle): + raise unknownPayload("Unknown bundle") - let version = payloadGeneric.version + let version = bundle.payload.version if version > expectedVersion: raise unsupportedFork("getPayload" & $expectedVersion & - " expect ExecutionPayload" & $expectedVersion & - " but get ExecutionPayload" & $version) - if blobsBundle.isSome: + " expect payload" & $expectedVersion & + " but get payload" & $version) + if bundle.blobsBundle.isSome: raise unsupportedFork("getPayload" & $expectedVersion & " contains unsupported BlobsBundleV1") GetPayloadV2Response( - executionPayload: payloadGeneric.V1V2, - blockValue: blockValue + executionPayload: bundle.payload.V1V2, + blockValue: bundle.blockValue ) proc getPayloadV3*(ben: BeaconEngineRef, id: Bytes8): GetPayloadV3Response = trace "Engine API request received", meth = "GetPayload", id - var payloadGeneric: ExecutionPayload - var blockValue: UInt256 - var blobsBundle: Opt[BlobsBundleV1] - if not ben.get(id, blockValue, payloadGeneric, blobsBundle): - raise unknownPayload("Unknown payload") + var bundle: ExecutionBundle + if not ben.get(id, bundle): + raise unknownPayload("Unknown bundle") - let version = payloadGeneric.version + let version = bundle.payload.version if version != Version.V3: - raise unsupportedFork("getPayloadV3 expect ExecutionPayloadV3 but get ExecutionPayload" & $version) - if blobsBundle.isNone: + raise unsupportedFork("getPayloadV3 expect payloadV3 but get payload" & $version) + if bundle.blobsBundle.isNone: raise unsupportedFork("getPayloadV3 is missing BlobsBundleV1") - let payload = payloadGeneric.V3 let com = ben.com - if not com.isCancunOrLater(ethTime payload.timestamp): - raise unsupportedFork("payload timestamp is less than Cancun activation") + if not com.isCancunOrLater(ethTime bundle.payload.timestamp): + raise unsupportedFork("bundle timestamp is less than Cancun activation") GetPayloadV3Response( - executionPayload: payload, - blockValue: blockValue, - blobsBundle: blobsBundle.get, + executionPayload: bundle.payload.V3, + blockValue: bundle.blockValue, + blobsBundle: bundle.blobsBundle.get, shouldOverrideBuilder: false ) @@ -75,30 +70,26 @@ proc getPayloadV4*(ben: BeaconEngineRef, id: Bytes8): GetPayloadV4Response = trace "Engine API request received", meth = "GetPayload", id - var payloadGeneric: ExecutionPayload - var blockValue: UInt256 - var blobsBundle: Opt[BlobsBundleV1] - var executionRequests: Opt[array[3, seq[byte]]] - if not ben.get(id, blockValue, payloadGeneric, blobsBundle, executionRequests): - raise unknownPayload("Unknown payload") + var bundle: ExecutionBundle + if not ben.get(id, bundle): + raise unknownPayload("Unknown bundle") - let version = payloadGeneric.version + let version = bundle.payload.version if version != Version.V3: - raise unsupportedFork("getPayloadV4 expect ExecutionPayloadV3 but get ExecutionPayload" & $version) - if blobsBundle.isNone: + raise unsupportedFork("getPayloadV4 expect payloadV3 but get payload" & $version) + if bundle.blobsBundle.isNone: raise unsupportedFork("getPayloadV4 is missing BlobsBundleV1") - if executionRequests.isNone: + if bundle.executionRequests.isNone: raise unsupportedFork("getPayloadV4 is missing executionRequests") - let payload = payloadGeneric.V3 let com = ben.com - if not com.isPragueOrLater(ethTime payload.timestamp): - raise unsupportedFork("payload timestamp is less than Prague activation") + if not com.isPragueOrLater(ethTime bundle.payload.timestamp): + raise unsupportedFork("bundle timestamp is less than Prague activation") GetPayloadV4Response( - executionPayload: payload, - blockValue: blockValue, - blobsBundle: blobsBundle.get, + executionPayload: bundle.payload.V3, + blockValue: bundle.blockValue, + blobsBundle: bundle.blobsBundle.get, shouldOverrideBuilder: false, - executionRequests: executionRequests.get, + executionRequests: bundle.executionRequests.get, ) diff --git a/nimbus/beacon/api_handler/api_newpayload.nim b/nimbus/beacon/api_handler/api_newpayload.nim index 17e88b2018..e06e73db3c 100644 --- a/nimbus/beacon/api_handler/api_newpayload.nim +++ b/nimbus/beacon/api_handler/api_newpayload.nim @@ -192,13 +192,18 @@ proc newPayload*(ben: BeaconEngineRef, hash = blockHash, number = header.number let vres = ben.chain.importBlock(blk) if vres.isErr: + warn "Error importing block", + number = header.number, + hash = blockHash.short, + parent = header.parentHash.short, + error = vres.error() ben.setInvalidAncestor(header, blockHash) let blockHash = latestValidHash(db, parent, ttd) return invalidStatus(blockHash, vres.error()) info "New payload received and validated", - number = header.number, - hash = blockHash.short, + number = header.number, + hash = blockHash.short, parent = header.parentHash.short, txs = blk.transactions.len, gasUsed = header.gasUsed, diff --git a/nimbus/beacon/api_handler/api_utils.nim b/nimbus/beacon/api_handler/api_utils.nim index 5717004f2c..5c1d2cef66 100644 --- a/nimbus/beacon/api_handler/api_utils.nim +++ b/nimbus/beacon/api_handler/api_utils.nim @@ -9,7 +9,7 @@ import std/[typetraits, strutils], - eth/rlp, + web3/execution_types, json_rpc/errors, nimcrypto/sha2, stew/endians2, @@ -18,7 +18,7 @@ import ../../db/core_db, ../../utils/utils, ../../common/common, - web3/execution_types, + ../../core/chain, ../web3_eth_conv {.push gcsafe, raises:[].} @@ -48,10 +48,10 @@ proc computePayloadId*(blockHash: common.Hash32, (distinctBase result)[0..7] = dest.data[0..7] proc validateBlockHash*(header: common.Header, - gotHash: common.Hash32, + wantHash: common.Hash32, version: Version): Result[void, PayloadStatusV1] {.gcsafe, raises: [ValueError].} = - let wantHash = header.blockHash + let gotHash = header.blockHash if wantHash != gotHash: let status = if version == Version.V1: PayloadExecutionStatus.invalid_block_hash @@ -173,28 +173,25 @@ proc tooLargeRequest*(msg: string): ref InvalidRequest = ) proc latestValidHash*(db: CoreDbRef, - parent: common.Header, - ttd: DifficultyInt): common.Hash32 = + parent: Header, + ttd: DifficultyInt): Hash32 = if parent.isGenesis: - return default(common.Hash32) + return default(Hash32) let ptd = db.getScore(parent.parentHash).valueOr(0.u256) if ptd >= ttd: parent.blockHash else: # If the most recent valid ancestor is a PoW block, # latestValidHash MUST be set to ZERO - default(common.Hash32) + default(Hash32) proc invalidFCU*(validationError: string, - com: CommonRef, - header: common.Header): ForkchoiceUpdatedResponse = - var parent: common.Header - if not com.db.getBlockHeader(header.parentHash, parent): + chain: ForkedChainRef, + header: Header): ForkchoiceUpdatedResponse = + let parent = chain.headerByHash(header.parentHash).valueOr: return invalidFCU(validationError) - let blockHash = try: - latestValidHash(com.db, parent, com.ttd.get(high(UInt256))) - except RlpError: - default(common.Hash32) + let blockHash = + latestValidHash(chain.db, parent, chain.com.ttd.get(high(UInt256))) invalidFCU(validationError, blockHash) diff --git a/nimbus/beacon/beacon_engine.nim b/nimbus/beacon/beacon_engine.nim index 87b66e477c..a26128149b 100644 --- a/nimbus/beacon/beacon_engine.nim +++ b/nimbus/beacon/beacon_engine.nim @@ -9,18 +9,18 @@ import std/[sequtils, tables], - ./web3_eth_conv, - ./payload_conv, + eth/common/[hashes, headers], chronicles, web3/execution_types, + ./web3_eth_conv, + ./payload_conv, ./payload_queue, ./api_handler/api_utils, - ../db/core_db, - ../core/[tx_pool, casper, chain], - eth/common/[hashes, headers] + ../core/[tx_pool, casper, chain] export - chain + chain, + ExecutionBundle type BeaconEngineRef* = ref object @@ -117,28 +117,8 @@ func put*(ben: BeaconEngineRef, ben.queue.put(hash, header) func put*(ben: BeaconEngineRef, id: Bytes8, - blockValue: UInt256, payload: ExecutionPayload, - blobsBundle: Opt[BlobsBundleV1]) = - ben.queue.put(id, blockValue, payload, blobsBundle) - -func put*(ben: BeaconEngineRef, id: Bytes8, - blockValue: UInt256, payload: ExecutionPayload, - blobsBundle: Opt[BlobsBundleV1], - executionRequests: Opt[array[3, seq[byte]]]) = - ben.queue.put(id, blockValue, payload, blobsBundle, executionRequests) - -func put*(ben: BeaconEngineRef, id: Bytes8, - blockValue: UInt256, payload: SomeExecutionPayload, - blobsBundle: Opt[BlobsBundleV1]) = - doAssert blobsBundle.isNone == (payload is - ExecutionPayloadV1 | ExecutionPayloadV2) - ben.queue.put(id, blockValue, payload, blobsBundle) - -func put*(ben: BeaconEngineRef, id: Bytes8, - blockValue: UInt256, - payload: ExecutionPayloadV1 | ExecutionPayloadV2) = - ben.queue.put( - id, blockValue, payload, blobsBundle = Opt.none(BlobsBundleV1)) + payload: ExecutionBundle) = + ben.queue.put(id, payload) # ------------------------------------------------------------------------------ # Public functions, getters @@ -154,52 +134,15 @@ func get*(ben: BeaconEngineRef, hash: Hash32, ben.queue.get(hash, header) func get*(ben: BeaconEngineRef, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayload, - blobsBundle: var Opt[BlobsBundleV1]): bool = - ben.queue.get(id, blockValue, payload, blobsBundle) - -func get*(ben: BeaconEngineRef, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayload, - blobsBundle: var Opt[BlobsBundleV1], - executionRequests: var Opt[array[3, seq[byte]]]): bool = - ben.queue.get(id, blockValue, payload, blobsBundle, executionRequests) - -func get*(ben: BeaconEngineRef, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV1): bool = - ben.queue.get(id, blockValue, payload) - -func get*(ben: BeaconEngineRef, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV2): bool = - ben.queue.get(id, blockValue, payload) - -func get*(ben: BeaconEngineRef, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV3, - blobsBundle: var BlobsBundleV1): bool = - ben.queue.get(id, blockValue, payload, blobsBundle) - -func get*(ben: BeaconEngineRef, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV1OrV2): bool = - ben.queue.get(id, blockValue, payload) + payload: var ExecutionBundle): bool = + ben.queue.get(id, payload) # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ - -type AssembledExecutionPayload* = object - executionPayload*: ExecutionPayload - blobsBundle*: Opt[BlobsBundleV1] - blockValue*: UInt256 - executionRequests*: Opt[array[3, seq[byte]]] - -proc generatePayload*(ben: BeaconEngineRef, +proc generateExecutionBundle*(ben: BeaconEngineRef, attrs: PayloadAttributes): - Result[AssembledExecutionPayload, string] = + Result[ExecutionBundle, string] = wrapException: let xp = ben.txPool @@ -234,12 +177,12 @@ proc generatePayload*(ben: BeaconEngineRef, if bundle.blobsBundle.isSome: template blobData: untyped = bundle.blobsBundle.get blobsBundle = Opt.some BlobsBundleV1( - commitments: blobData.commitments.mapIt it.Web3KZGCommitment, - proofs: blobData.proofs.mapIt it.Web3KZGProof, + commitments: blobData.commitments, + proofs: blobData.proofs, blobs: blobData.blobs.mapIt it.Web3Blob) - ok AssembledExecutionPayload( - executionPayload: executionPayload(bundle.blk), + ok ExecutionBundle( + payload: executionPayload(bundle.blk), blobsBundle: blobsBundle, blockValue: bundle.blockValue, executionRequests: bundle.executionRequests) @@ -252,6 +195,13 @@ func setInvalidAncestor*(ben: BeaconEngineRef, header: Header, blockHash: Hash32 # bad ancestor. If yes, it constructs the payload failure response to return. proc checkInvalidAncestor*(ben: BeaconEngineRef, check, head: Hash32): Opt[PayloadStatusV1] = + proc latestValidHash(chain: ForkedChainRef, invalid: auto): Hash32 = + let parent = chain.headerByHash(invalid.parentHash).valueOr: + return invalid.parentHash + if parent.difficulty != 0.u256: + return default(Hash32) + invalid.parentHash + # If the hash to check is unknown, return valid ben.invalidTipsets.withValue(check, invalid) do: # If the bad hash was hit too many times, evict it and try to reprocess in @@ -292,16 +242,9 @@ proc checkInvalidAncestor*(ben: BeaconEngineRef, ben.invalidTipsets[head] = invalid[] - var lastValid = invalid.parentHash - # If the last valid hash is the terminal pow block, return 0x0 for latest valid hash - var header: Header - if ben.com.db.getBlockHeader(invalid.parentHash, header): - if header.difficulty != 0.u256: - lastValid = default(Hash32) - + let lastValid = latestValidHash(ben.chain, invalid) return Opt.some invalidStatus(lastValid, "links to previously rejected block") - do: return Opt.none(PayloadStatusV1) diff --git a/nimbus/beacon/payload_conv.nim b/nimbus/beacon/payload_conv.nim index 57991fba85..a41d20761f 100644 --- a/nimbus/beacon/payload_conv.nim +++ b/nimbus/beacon/payload_conv.nim @@ -19,15 +19,15 @@ import # Private helpers # ------------------------------------------------------------------------------ -template append(w: var RlpWriter, t: TypedTransaction) = - w.appendRawBytes(distinctBase t) +template append(w: var RlpWriter, typedTransaction: TypedTransaction) = + w.appendRawBytes(distinctBase typedTransaction) -template append(w: var RlpWriter, t: WithdrawalV1) = +template append(w: var RlpWriter, withdrawalV1: WithdrawalV1) = w.append blocks.Withdrawal( - index: distinctBase(t.index), - validatorIndex: distinctBase(t.validatorIndex), - address: t.address, - amount: distinctBase(t.amount), + index: distinctBase(withdrawalV1.index), + validatorIndex: distinctBase(withdrawalV1.validatorIndex), + address: withdrawalV1.address, + amount: distinctBase(withdrawalV1.amount), ) func wdRoot(list: openArray[WithdrawalV1]): Hash32 = diff --git a/nimbus/beacon/payload_queue.nim b/nimbus/beacon/payload_queue.nim index 8013b7bc3b..e4d13332ec 100644 --- a/nimbus/beacon/payload_queue.nim +++ b/nimbus/beacon/payload_queue.nim @@ -31,12 +31,16 @@ type SimpleQueue[M: static[int]; T] = object list: array[M, QueueItem[T]] + ExecutionBundle* = object + payload*: ExecutionPayload + blockValue*: UInt256 + blobsBundle*: Opt[BlobsBundleV1] + executionRequests*: Opt[array[3, seq[byte]]] + targetBlobsPerBlock*: Opt[Quantity] + PayloadItem = object id: Bytes8 - payload: ExecutionPayload - blockValue: UInt256 - blobsBundle: Opt[BlobsBundleV1] - executionRequests: Opt[array[3, seq[byte]]] + payload: ExecutionBundle HeaderItem = object hash: Hash32 @@ -73,30 +77,8 @@ proc put*(api: var PayloadQueue, api.headerQueue.put(HeaderItem(hash: hash, header: header)) proc put*(api: var PayloadQueue, id: Bytes8, - blockValue: UInt256, payload: ExecutionPayload, - blobsBundle: Opt[BlobsBundleV1]) = - api.payloadQueue.put(PayloadItem(id: id, - payload: payload, blockValue: blockValue, blobsBundle: blobsBundle)) - -proc put*(api: var PayloadQueue, id: Bytes8, - blockValue: UInt256, payload: ExecutionPayload, - blobsBundle: Opt[BlobsBundleV1], - executionRequests: Opt[array[3, seq[byte]]]) = - api.payloadQueue.put(PayloadItem(id: id, - payload: payload, blockValue: blockValue, - blobsBundle: blobsBundle, executionRequests: executionRequests)) - -proc put*(api: var PayloadQueue, id: Bytes8, - blockValue: UInt256, payload: SomeExecutionPayload, - blobsBundle: Opt[BlobsBundleV1]) = - doAssert blobsBundle.isNone == (payload is - ExecutionPayloadV1 | ExecutionPayloadV2) - api.put(id, blockValue, payload.executionPayload, blobsBundle = blobsBundle) - -proc put*(api: var PayloadQueue, id: Bytes8, - blockValue: UInt256, - payload: ExecutionPayloadV1 | ExecutionPayloadV2) = - api.put(id, blockValue, payload, blobsBundle = Opt.none(BlobsBundleV1)) + payload: ExecutionBundle) = + api.payloadQueue.put(PayloadItem(id: id, payload: payload)) # ------------------------------------------------------------------------------ # Public functions, getters @@ -111,81 +93,9 @@ proc get*(api: PayloadQueue, hash: Hash32, false proc get*(api: PayloadQueue, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayload, - blobsBundle: var Opt[BlobsBundleV1]): bool = - for x in api.payloadQueue: - if x.id == id: - payload = x.payload - blockValue = x.blockValue - blobsBundle = x.blobsBundle - return true - false - -proc get*(api: PayloadQueue, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayload, - blobsBundle: var Opt[BlobsBundleV1], - executionRequests: var Opt[array[3, seq[byte]]]): bool = + payload: var ExecutionBundle): bool = for x in api.payloadQueue: if x.id == id: payload = x.payload - blockValue = x.blockValue - blobsBundle = x.blobsBundle - executionRequests = x.executionRequests return true false - -proc get*(api: PayloadQueue, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV1): bool = - var - p: ExecutionPayload - blobsBundleOpt: Opt[BlobsBundleV1] - let found = api.get(id, blockValue, p, blobsBundleOpt) - if found: - doAssert(p.version == Version.V1) - payload = p.V1 - doAssert(blobsBundleOpt.isNone) - return found - -proc get*(api: PayloadQueue, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV2): bool = - var - p: ExecutionPayload - blobsBundleOpt: Opt[BlobsBundleV1] - let found = api.get(id, blockValue, p, blobsBundleOpt) - if found: - doAssert(p.version == Version.V2) - payload = p.V2 - doAssert(blobsBundleOpt.isNone) - return found - -proc get*(api: PayloadQueue, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV3, - blobsBundle: var BlobsBundleV1): bool = - var - p: ExecutionPayload - blobsBundleOpt: Opt[BlobsBundleV1] - let found = api.get(id, blockValue, p, blobsBundleOpt) - if found: - doAssert(p.version == Version.V3) - payload = p.V3 - doAssert(blobsBundleOpt.isSome) - blobsBundle = blobsBundleOpt.unsafeGet - return found - -proc get*(api: PayloadQueue, id: Bytes8, - blockValue: var UInt256, - payload: var ExecutionPayloadV1OrV2): bool = - var - p: ExecutionPayload - blobsBundleOpt: Opt[BlobsBundleV1] - let found = api.get(id, blockValue, p, blobsBundleOpt) - if found: - doAssert(p.version in {Version.V1, Version.V2}) - payload = p.V1V2 - doAssert(blobsBundleOpt.isNone) - return found diff --git a/nimbus/beacon/web3_eth_conv.nim b/nimbus/beacon/web3_eth_conv.nim index 671b626683..a4a61937b8 100644 --- a/nimbus/beacon/web3_eth_conv.nim +++ b/nimbus/beacon/web3_eth_conv.nim @@ -26,11 +26,8 @@ export type Web3Quantity* = web3types.Quantity Web3ExtraData* = web3types.DynamicBytes[0, 32] - Web3BlockNumber* = Quantity Web3Tx* = engine_api_types.TypedTransaction Web3Blob* = engine_api_types.Blob - Web3KZGProof* = engine_api_types.KZGProof - Web3KZGCommitment* = engine_api_types.KZGCommitment {.push gcsafe, raises:[].} @@ -57,36 +54,35 @@ func u64*(x: Opt[Web3Quantity]): Opt[uint64] = if x.isNone: Opt.none(uint64) else: Opt.some(uint64 x.get) -func u256*(x: Web3BlockNumber): UInt256 = +func u256*(x: Web3Quantity): UInt256 = u256(x.uint64) -func u256*(x: common.FixedBytes[32]): UInt256 = +func u256*(x: FixedBytes[32]): UInt256 = UInt256.fromBytesBE(x.data) -func ethTime*(x: Web3Quantity): common.EthTime = - common.EthTime(x) - -func ethGasInt*(x: Web3Quantity): common.GasInt = - common.GasInt x +func ethTime*(x: Web3Quantity): EthTime = + EthTime(x) func ethBlob*(x: Web3ExtraData): seq[byte] = distinctBase x -func ethWithdrawal*(x: WithdrawalV1): common.Withdrawal = - result.index = x.index.uint64 - result.validatorIndex = x.validatorIndex.uint64 - result.address = x.address - result.amount = x.amount.uint64 +func ethWithdrawal*(x: WithdrawalV1): Withdrawal = + Withdrawal( + index: x.index.uint64, + validatorIndex: x.validatorIndex.uint64, + address: x.address, + amount: x.amount.uint64, + ) func ethWithdrawals*(list: openArray[WithdrawalV1]): - seq[common.Withdrawal] = - result = newSeqOfCap[common.Withdrawal](list.len) + seq[Withdrawal] = + result = newSeqOfCap[Withdrawal](list.len) for x in list: result.add ethWithdrawal(x) func ethWithdrawals*(x: Opt[seq[WithdrawalV1]]): - Opt[seq[common.Withdrawal]] = - if x.isNone: Opt.none(seq[common.Withdrawal]) + Opt[seq[Withdrawal]] = + if x.isNone: Opt.none(seq[Withdrawal]) else: Opt.some(ethWithdrawals x.get) func ethTx*(x: Web3Tx): common.Transaction {.gcsafe, raises:[RlpError].} = @@ -98,6 +94,27 @@ func ethTxs*(list: openArray[Web3Tx]): for x in list: result.add ethTx(x) +func ethAuth*(x: AuthorizationObject): Authorization = + Authorization( + chainId: ChainId x.chainId, + address: x.address, + nonce: distinctBase x.nonce, + v: distinctBase x.v, + r: x.r, + s: x.s, + ) + +func ethAuthList*(list: openArray[AuthorizationObject]): + seq[Authorization] = + result = newSeqOfCap[Authorization](list.len) + for x in list: + result.add ethAuth(x) + +func ethAuthList*(x: Opt[seq[AuthorizationObject]]): + Opt[seq[Authorization]] = + if x.isNone: Opt.none(seq[Authorization]) + else: Opt.some(ethAuthList x.get) + # ------------------------------------------------------------------------------ # Eth types to Web3 types # ------------------------------------------------------------------------------ @@ -105,10 +122,10 @@ func ethTxs*(list: openArray[Web3Tx]): func w3Qty*(x: UInt256): Web3Quantity = Web3Quantity x.truncate(uint64) -func w3Qty*(x: common.EthTime): Web3Quantity = +func w3Qty*(x: EthTime): Web3Quantity = Web3Quantity x.uint64 -func w3Qty*(x: common.EthTime, y: int): Web3Quantity = +func w3Qty*(x: EthTime, y: int): Web3Quantity = Web3Quantity(x + y.EthTime) func w3Qty*(x: Web3Quantity, y: int): Web3Quantity = @@ -130,16 +147,6 @@ func w3Qty*(x: uint64): Web3Quantity = func w3Qty*(x: int64): Web3Quantity = Web3Quantity(x) -func w3BlockNumber*(x: Opt[uint64]): Opt[Web3BlockNumber] = - if x.isNone: Opt.none(Web3BlockNumber) - else: Opt.some(Web3BlockNumber x.get) - -func w3BlockNumber*(x: uint64): Web3BlockNumber = - Web3BlockNumber(x) - -func w3BlockNumber*(x: UInt256): Web3BlockNumber = - Web3BlockNumber x.truncate(uint64) - func w3ExtraData*(x: seq[byte]): Web3ExtraData = Web3ExtraData x diff --git a/nimbus/common/chain_config.nim b/nimbus/common/chain_config.nim index d618b73c4f..686f18168d 100644 --- a/nimbus/common/chain_config.nim +++ b/nimbus/common/chain_config.nim @@ -445,7 +445,9 @@ func chainConfigForNetwork*(id: NetworkId): ChainConfig = result = case id of MainNet: - const mainNetTTD = parse("58750000000000000000000",UInt256) + const + mainNetTTD = parse("58750000000000000000000",UInt256) + MAINNET_DEPOSIT_CONTRACT_ADDRESS = address"0x00000000219ab540356cbb839cbe05303d7705fa" ChainConfig( chainId: MainNet.ChainId, # Genesis (Frontier): # 2015-07-30 15:26:13 UTC @@ -470,6 +472,7 @@ func chainConfigForNetwork*(id: NetworkId): ChainConfig = terminalTotalDifficulty: Opt.some(mainNetTTD), shanghaiTime: Opt.some(1_681_338_455.EthTime), # 2023-04-12 10:27:35 UTC cancunTime: Opt.some(1_710_338_135.EthTime), # 2024-03-13 13:55:35 UTC + depositContractAddress: Opt.some(MAINNET_DEPOSIT_CONTRACT_ADDRESS), ) of SepoliaNet: const sepoliaTTD = parse("17000000000000000",UInt256) @@ -540,7 +543,7 @@ func genesisBlockForNetwork*(id: NetworkId): Genesis difficulty: 0x01.u256, gasLimit: 0x17D7840, nonce: uint64(0x1234).to(Bytes8), - timestamp: EthTime(1_695_902_100), + timestamp: EthTime(0x65156994), alloc: decodePrealloc(holeskyAllocData) ) else: diff --git a/nimbus/common/chain_config_hash.nim b/nimbus/common/chain_config_hash.nim new file mode 100644 index 0000000000..9bf2e1661d --- /dev/null +++ b/nimbus/common/chain_config_hash.nim @@ -0,0 +1,84 @@ +# Nimbus +# Copyright (c) 2021-2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) +# * MIT license ([LICENSE-MIT](LICENSE-MIT)) +# at your option. +# This file may not be copied, modified, or distributed except according to +# those terms. + +{.push raises: [].} + +import + std/[typetraits, tables], + eth/common/base, + eth/common/times, + eth/common/hashes, + eth/common/addresses, + stew/endians2, + stint, + nimcrypto/sha2, + ./chain_config + +# ------------------------------------------------------------------------------ +# When the client doing initialization step, it will go through +# complicated steps before the genesis hash is ready. See `CommonRef.init`. +# If the genesis happen to exists in database belonging to other network, +# it will replace the one in CommonRef cache. +# That is the reason why using genesis header or genesis hash + ChainId is +# not a good solution to prevent loading existing data directory for +# the wrong network. +# But the ChainConfig + raw Genesis hash will make the job done before +# CommonRef creation. +# ------------------------------------------------------------------------------ + +# ------------------------------------------------------------------------------ +# Private helper functions +# ------------------------------------------------------------------------------ + +func update(ctx: var sha256, val: uint64 | UInt256) = + ctx.update(val.toBytesLE) + +func update(ctx: var sha256, val: ChainId | EthTime | NetworkId) = + ctx.update(distinctBase val) + +func update(ctx: var sha256, val: bool) = + ctx.update([val.byte]) + +func update(ctx: var sha256, val: Hash32 | Bytes8 | Bytes32 | Address) = + ctx.update(val.data) + +func update[T](ctx: var sha256, val: Opt[T]) = + if val.isSome: + ctx.update(val.get) + +func update[K, V](ctx: var sha256, val: Table[K, V]) = + mixin update + for k, v in val: + ctx.update(k) + ctx.update(v) + +func update[T: object](ctx: var sha256, val: T) = + for f in fields(val): + ctx.update(f) + +func update[T: ref](ctx: var sha256, val: T) = + for f in fields(val[]): + ctx.update(f) + +# ------------------------------------------------------------------------------ +# Public functions +# ------------------------------------------------------------------------------ + +func calcHash*(networkId: NetworkId, conf: ChainConfig, genesis: Genesis): Hash32 = + var ctx: sha256 + ctx.init() + ctx.update(networkId) + ctx.update(conf) + if genesis.isNil.not: + ctx.update(genesis) + ctx.finish(result.data) + ctx.clear() + +func calcHash*(networkId: NetworkId, params: NetworkParams): Hash32 = + calcHash(networkId, params.config, params.genesis) diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 04b698eb86..3114620c21 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -15,7 +15,7 @@ import ../core/casper, ../db/[core_db, ledger, storage_types], ../utils/[utils, ec_recover], - ".."/[constants, errors], + ".."/[constants, errors, version], "."/[chain_config, evmforks, genesis, hardforks] export @@ -95,6 +95,9 @@ type pruneHistory: bool ## Must not not set for a full node, might go away some time + extraData: string + ## Value of extraData field when building block + # ------------------------------------------------------------------------------ # Forward declarations # ------------------------------------------------------------------------------ @@ -125,39 +128,36 @@ proc initializeDb(com: CommonRef) = proc contains(kvt: CoreDbKvtRef; key: openArray[byte]): bool = kvt.hasKeyRc(key).expect "valid bool" if canonicalHeadHashKey().toOpenArray notin kvt: - info "Writing genesis to DB" + info "Writing genesis to DB", + blockHash = com.genesisHeader.rlpHash, + stateRoot = com.genesisHeader.stateRoot, + difficulty = com.genesisHeader.difficulty, + gasLimit = com.genesisHeader.gasLimit, + timestamp = com.genesisHeader.timestamp, + nonce = com.genesisHeader.nonce doAssert(com.genesisHeader.number == 0.BlockNumber, "can't commit genesis block with number > 0") - doAssert(com.db.persistHeader(com.genesisHeader, + com.db.persistHeader(com.genesisHeader, com.proofOfStake(com.genesisHeader), - startOfHistory=com.genesisHeader.parentHash), - "can persist genesis header") + startOfHistory=com.genesisHeader.parentHash). + expect("can persist genesis header") doAssert(canonicalHeadHashKey().toOpenArray in kvt) # The database must at least contain the base and head pointers - the base # is implicitly considered finalized let baseNum = com.db.getSavedStateBlockNumber() - base = - try: - com.db.getBlockHeader(baseNum) - except BlockNotFound as exc: - fatal "Cannot load base block header", - baseNum, err = exc.msg - quit 1 - finalized = - try: - com.db.finalizedHeader() - except BlockNotFound: - debug "No finalized block stored in database, reverting to base" - base - head = - try: - com.db.getCanonicalHead() - except EVMError as exc: - fatal "Cannot load canonical block header", - err = exc.msg - quit 1 + base = com.db.getBlockHeader(baseNum).valueOr: + fatal "Cannot load base block header", + baseNum, err = error + quit 1 + finalized = com.db.finalizedHeader().valueOr: + debug "No finalized block stored in database, reverting to base" + base + head = com.db.getCanonicalHead().valueOr: + fatal "Cannot load canonical block header", + err = error + quit 1 info "Database initialized", base = (base.blockHash, base.number), @@ -169,8 +169,7 @@ proc init(com : CommonRef, networkId : NetworkId, config : ChainConfig, genesis : Genesis, - pruneHistory: bool, - ) = + pruneHistory: bool) = config.daoCheck() @@ -181,7 +180,8 @@ proc init(com : CommonRef, com.syncProgress= SyncProgress() com.syncState = Waiting com.pruneHistory= pruneHistory - com.pos = CasperRef.new + com.pos = CasperRef.new + com.extraData = ShortClientId # com.forkIdCalculator and com.genesisHash are set # by setForkId @@ -195,8 +195,8 @@ proc init(com : CommonRef, fork = toHardFork(com.forkTransitionTable, forkDeterminer) # Must not overwrite the global state on the single state DB - if not db.getBlockHeader(0.BlockNumber, com.genesisHeader): - com.genesisHeader = toGenesisHeader(genesis, fork, com.db) + com.genesisHeader = db.getBlockHeader(0.BlockNumber).valueOr: + toGenesisHeader(genesis, fork, com.db) com.setForkId(com.genesisHeader) com.pos.timestamp = genesis.timestamp @@ -328,6 +328,9 @@ proc proofOfStake*(com: CommonRef, header: Header): bool = # This costly check is only executed from test suite com.isBlockAfterTtd(header) +func depositContractAddress*(com: CommonRef): Address = + com.config.depositContractAddress.get(default(Address)) + proc syncReqNewHead*(com: CommonRef; header: Header) {.gcsafe, raises: [].} = ## Used by RPC updater @@ -414,6 +417,9 @@ func syncHighest*(com: CommonRef): BlockNumber = func syncState*(com: CommonRef): SyncState = com.syncState +func extraData*(com: CommonRef): string = + com.extraData + # ------------------------------------------------------------------------------ # Setters # ------------------------------------------------------------------------------ @@ -452,6 +458,9 @@ func `notifyBadBlock=`*(com: CommonRef; cb: NotifyBadBlockCB) = ## Activate or reset a call back handler for bad block notification. com.notifyBadBlock = cb +func `extraData=`*(com: CommonRef, val: string) = + com.extraData = val + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/common/hardforks.nim b/nimbus/common/hardforks.nim index 37c3d4ae0c..7ed02deb1a 100644 --- a/nimbus/common/hardforks.nim +++ b/nimbus/common/hardforks.nim @@ -176,6 +176,7 @@ type terminalTotalDifficulty*: Opt[UInt256] terminalTotalDifficultyPassed*: Opt[bool] + depositContractAddress*: Opt[Address] # These are used for checking that the values of the fields # are in a valid order. diff --git a/nimbus/compile_info.nim b/nimbus/compile_info.nim index b382bbfffb..8e48b54631 100644 --- a/nimbus/compile_info.nim +++ b/nimbus/compile_info.nim @@ -9,8 +9,7 @@ # according to those terms. import - ./db/core_db/base/base_config, - ./db/ledger/base/base_config + ./db/core_db/base/base_config func vmName(): string = when defined(evmc_enabled): @@ -29,8 +28,6 @@ const rc &= ", boehm/gc" when 0 < coreDbBaseConfigExtras.len: rc &= ", " & coreDbBaseConfigExtras - when 0 < ledgerBaseConfigExtras.len: - rc &= ", " & ledgerBaseConfigExtras rc &= " enabled" rc diff --git a/nimbus/config.nim b/nimbus/config.nim index 6ab9835a1e..f3fbd9768c 100644 --- a/nimbus/config.nim +++ b/nimbus/config.nim @@ -12,7 +12,6 @@ import std/[ options, strutils, - times, os, uri, net @@ -31,35 +30,24 @@ import export net, defs -const - # TODO: fix this agent-string format to match other - # eth clients format - NimbusIdent* = "$# v$# [$#: $#, $#, $#]" % [ - NimbusName, - NimbusVersion, - hostOS, - hostCPU, - VmName, - GitRevision - ] -let +const # e.g.: Copyright (c) 2018-2021 Status Research & Development GmbH NimbusCopyright* = "Copyright (c) 2018-" & - $(now().utc.year) & + CompileDate.split('-')[0] & " Status Research & Development GmbH" # e.g.: - # Nimbus v0.1.0 [windows: amd64, rocksdb, evmc, dda8914f] + # nimbus/v0.1.0-abcdef/os-cpu/nim-a.b.c/emvc # Copyright (c) 2018-2021 Status Research & Development GmbH NimbusBuild* = "$#\p$#" % [ - NimbusIdent, + ClientId, NimbusCopyright, ] - NimbusHeader* = "$#\p\p$#" % [ + NimbusHeader* = "$#\p\pNim version $#" % [ NimbusBuild, - version.NimVersion + NimVersion ] func defaultDataDir*(): string = @@ -94,7 +82,7 @@ const let defaultListenAddress = getAutoAddress(Port(0)).toIpAddress() - defaultListenAddressDesc = $defaultListenAddress & ", meaning all network interfaces" + defaultListenAddressDesc = $defaultListenAddress & ", meaning all network interfaces" # `when` around an option doesn't work with confutils; it fails to compile. # Workaround that by setting the `ignore` pragma on EVMC-specific options. @@ -117,11 +105,6 @@ type noCommand `import` - ProtocolFlag* {.pure.} = enum - ## Protocol flags - Eth ## enable eth subprotocol - #Snap ## enable snap sub-protocol - RpcFlag* {.pure.} = enum ## RPC flags Eth ## enable eth_ set of RPC API @@ -194,6 +177,12 @@ type defaultValueDesc: "Baked in trusted setup" name: "trusted-setup-file" .}: Option[string] + extraData* {. + desc: "Value of extraData field when assemble a block(max 32 bytes)" + defaultValue: ShortClientId + defaultValueDesc: $ShortClientId + name: "extra-data" .}: string + network {. separator: "\pETHEREUM NETWORK OPTIONS:" desc: "Name or id number of Ethereum network(mainnet(1), sepolia(11155111), holesky(17000), other=custom)" @@ -362,18 +351,10 @@ type agentString* {. desc: "Node agent string which is used as identifier in network" - defaultValue: NimbusIdent - defaultValueDesc: $NimbusIdent + defaultValue: ClientId + defaultValueDesc: $ClientId name: "agent-string" .}: string - protocols {. - desc: "Enable specific set of server protocols (available: Eth, " & - " None.) This will not affect the sync mode" - # " Snap, None.) This will not affect the sync mode" - defaultValue: @[] - defaultValueDesc: $ProtocolFlag.Eth - name: "protocols" .}: seq[string] - beaconChunkSize* {. hidden desc: "Number of blocks per database transaction for beacon sync" @@ -671,23 +652,6 @@ proc getNetworkId(conf: NimbusConf): Option[NetworkId] = error "Failed to parse network name or id", network quit QuitFailure -proc getProtocolFlags*(conf: NimbusConf): set[ProtocolFlag] = - if conf.protocols.len == 0: - return {ProtocolFlag.Eth} - - var noneOk = false - for item in repeatingList(conf.protocols): - case item.toLowerAscii() - of "eth": result.incl ProtocolFlag.Eth - # of "snap": result.incl ProtocolFlag.Snap - of "none": noneOk = true - else: - error "Unknown protocol", name=item - quit QuitFailure - if noneOk and 0 < result.len: - error "Setting none contradicts wire protocols", names = $result - quit QuitFailure - proc getRpcFlags(api: openArray[string]): set[RpcFlag] = if api.len == 0: return {RpcFlag.Eth} @@ -755,7 +719,7 @@ proc getBootNodes*(conf: NimbusConf): seq[ENode] = # Bootstrap nodes provided as ENRs for enr in conf.bootstrapEnrs: - let enode = Enode.fromEnr(enr).valueOr: + let enode = ENode.fromEnr(enr).valueOr: fatal "Invalid bootstrap ENR provided", error quit 1 @@ -770,7 +734,7 @@ proc getStaticPeers*(conf: NimbusConf): seq[ENode] = # Static peers provided as ENRs for enr in conf.staticPeersEnrs: - let enode = Enode.fromEnr(enr).valueOr: + let enode = ENode.fromEnr(enr).valueOr: fatal "Invalid static peer ENR provided", error quit 1 diff --git a/nimbus/constants.nim b/nimbus/constants.nim index 4c28e9ec63..1006cfc292 100644 --- a/nimbus/constants.nim +++ b/nimbus/constants.nim @@ -108,7 +108,6 @@ const initAddress(3) HISTORY_STORAGE_ADDRESS* = address"0x0aae40965e6800cd9b1f4b05ff21581047e3f91e" - DEPOSIT_CONTRACT_ADDRESS* = address"0x00000000219ab540356cbb839cbe05303d7705fa" - WITHDRAWAL_REQUEST_ADDRESS* = address"0x00A3ca265EBcb825B45F985A16CEFB49958cE017" - CONSOLIDATION_REQUEST_ADDRESS* = address"0x00b42dbF2194e931E80326D950320f7d9Dbeac02" + WITHDRAWAL_QUEUE_ADDRESS* = address"0x09Fc772D0857550724b07B850a4323f39112aAaA" + CONSOLIDATION_QUEUE_ADDRESS* = address"0x01aBEa29659e5e97C95107F20bb753cD3e09bBBb" # End diff --git a/nimbus/core/block_import.nim b/nimbus/core/block_import.nim index 4f80307feb..54e98e3f82 100644 --- a/nimbus/core/block_import.nim +++ b/nimbus/core/block_import.nim @@ -49,9 +49,8 @@ proc importRlpBlocks*(importFile: string, importRlpBlocks(bytes, chain, finalize) proc importRlpBlocks*(conf: NimbusConf, com: CommonRef) = - var head: Header - if not com.db.getCanonicalHead(head): - error "cannot get canonical head from db" + let head = com.db.getCanonicalHead().valueOr: + error "cannot get canonical head from db", msg=error quit(QuitFailure) let chain = newForkedChain(com, head, baseDistance = 0) diff --git a/nimbus/core/chain/chain_desc.nim b/nimbus/core/chain/chain_desc.nim index 3df56ee904..388b8454e6 100644 --- a/nimbus/core/chain/chain_desc.nim +++ b/nimbus/core/chain/chain_desc.nim @@ -55,15 +55,12 @@ func newChain*(com: CommonRef, proc newChain*(com: CommonRef): ChainRef = ## Constructor for the `Chain` descriptor object. All sub-object descriptors ## are initialised with defaults. So is extra block chain validation - try: - let header = com.db.getCanonicalHead() - let extraValidation = com.proofOfStake(header) - return ChainRef( - com: com, - extraValidation: extraValidation, - ) - except CatchableError: - doAssert(false, "no canonical head") + let header = com.db.getCanonicalHead().expect("canonical head exists") + let extraValidation = com.proofOfStake(header) + return ChainRef( + com: com, + extraValidation: extraValidation, + ) # ------------------------------------------------------------------------------ # Public `Chain` getters @@ -88,13 +85,6 @@ func verifyFrom*(c: ChainRef): BlockNumber = ## Getter c.verifyFrom -proc currentBlock*(c: ChainRef): Header - {.gcsafe, raises: [CatchableError].} = - ## currentBlock retrieves the current head block of the canonical chain. - ## Ideally the block should be retrieved from the blockchain's internal cache. - ## but now it's enough to retrieve it from database - c.db.getCanonicalHead() - # ------------------------------------------------------------------------------ # Public `Chain` setters # ------------------------------------------------------------------------------ diff --git a/nimbus/core/chain/forked_chain.nim b/nimbus/core/chain/forked_chain.nim index 19fa57585c..997d3fbb7d 100644 --- a/nimbus/core/chain/forked_chain.nim +++ b/nimbus/core/chain/forked_chain.nim @@ -89,11 +89,10 @@ proc processBlock(c: ForkedChainRef, # We still need to write header to database # because validateUncles still need it let blockHash = header.blockHash() - if not c.db.persistHeader( - blockHash, - header, - c.com.startOfHistory): - return err("Could not persist header") + ?c.db.persistHeader( + blockHash, + header, + c.com.startOfHistory) # update currentBlock *after* we persist it # so the rpc return consistent result @@ -164,7 +163,7 @@ proc validateBlock(c: ForkedChainRef, ok() -proc replaySegment(c: ForkedChainRef, target: Hash32) = +proc replaySegment*(c: ForkedChainRef, target: Hash32) = # Replay from base+1 to target block var prevHash = target @@ -423,14 +422,8 @@ proc init*( ## let base = com.db.getSavedStateBlockNumber - var - baseHash: Hash32 - baseHeader: Header - try: - baseHash = com.db.getBlockHash(base) - baseHeader = com.db.getBlockHeader(baseHash) - except BlockNotFound: - raiseAssert "Base header missing for #" & $base + baseHash = com.db.getBlockHash(base).expect("baseHash exists") + baseHeader = com.db.getBlockHeader(baseHash).expect("base header exists") # update global syncStart com.syncStart = baseHeader.number @@ -453,8 +446,7 @@ proc newForkedChain*(com: CommonRef, ## for some particular test or other applications. Otherwise consider ## `init()`. let baseHash = baseHeader.blockHash - - var chain = ForkedChainRef( + let chain = ForkedChainRef( com: com, db : com.db, baseHeader : baseHeader, @@ -635,24 +627,29 @@ func baseHash*(c: ForkedChainRef): Hash32 = func txRecords*(c: ForkedChainRef, txHash: Hash32): (Hash32, uint64) = c.txRecords.getOrDefault(txHash, (Hash32.default, 0'u64)) +func isInMemory*(c: ForkedChainRef, blockHash: Hash32): bool = + c.blocks.hasKey(blockHash) + func memoryBlock*(c: ForkedChainRef, blockHash: Hash32): BlockDesc = c.blocks.getOrDefault(blockHash) +func memoryTransaction*(c: ForkedChainRef, txHash: Hash32): Opt[Transaction] = + let (blockHash, index) = c.txRecords.getOrDefault(txHash, (Hash32.default, 0'u64)) + c.blocks.withValue(blockHash, val) do: + return Opt.some(val.blk.transactions[index]) + return Opt.none(Transaction) + proc latestBlock*(c: ForkedChainRef): Block = c.blocks.withValue(c.cursorHash, val) do: return val.blk do: # This can happen if block pointed by cursorHash is not loaded yet - try: - result = c.db.getEthBlock(c.cursorHash) - c.blocks[c.cursorHash] = BlockDesc( - blk: result, - receipts: c.db.getReceipts(result.header.receiptsRoot), - ) - except BlockNotFound: - doAssert(false, "Block should exists in database") - except RlpError: - doAssert(false, "Receipts should exists in database") + result = c.db.getEthBlock(c.cursorHash).expect("cursorBlock exists") + c.blocks[c.cursorHash] = BlockDesc( + blk: result, + receipts: c.db.getReceipts(result.header.receiptsRoot). + expect("receipts exists"), + ) proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Header, string] = if number > c.cursorHeader.number: @@ -665,11 +662,7 @@ proc headerByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Header, str return ok(c.baseHeader) if number < c.baseHeader.number: - var header: Header - if c.db.getBlockHeader(number, header): - return ok(header) - else: - return err("Failed to get header with number: " & $number) + return c.db.getBlockHeader(number) shouldNotKeyError: var prevHash = c.cursorHeader.parentHash @@ -687,38 +680,23 @@ proc headerByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Header, string] do: if c.baseHash == blockHash: return ok(c.baseHeader) - var header: Header - if c.db.getBlockHeader(blockHash, header): - return ok(header) - return err("Failed to get header with hash: " & $blockHash) + return c.db.getBlockHeader(blockHash) -proc blockByHash*(c: ForkedChainRef, blockHash: Hash32): Opt[Block] = +proc blockByHash*(c: ForkedChainRef, blockHash: Hash32): Result[Block, string] = # used by getPayloadBodiesByHash # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-3 # 4. Client software MAY NOT respond to requests for finalized blocks by hash. c.blocks.withValue(blockHash, val) do: - return Opt.some(val.blk) + return ok(val.blk) do: - var - header: Header - body: BlockBody - if c.db.getBlockHeader(blockHash, header) and c.db.getBlockBody(blockHash, body): - return ok(Block.init(move(header), move(body))) - else: - return Opt.none(Block) + return c.db.getEthBlock(blockHash) proc blockByNumber*(c: ForkedChainRef, number: BlockNumber): Result[Block, string] = if number > c.cursorHeader.number: return err("Requested block number not exists: " & $number) if number < c.baseHeader.number: - var - header: Header - body: BlockBody - if c.db.getBlockHeader(number, header) and c.db.getBlockBody(header, body): - return ok(Block.init(move(header), move(body))) - else: - return err("Failed to get block with number: " & $number) + return c.db.getEthBlock(number) shouldNotKeyError: var prevHash = c.cursorHash @@ -740,6 +718,9 @@ func blockFromBaseTo*(c: ForkedChainRef, number: BlockNumber): seq[Block] = prevHash = item.blk.header.parentHash func isCanonical*(c: ForkedChainRef, blockHash: Hash32): bool = + if blockHash == c.baseHash: + return true + shouldNotKeyError: var prevHash = c.cursorHash while prevHash != c.baseHash: @@ -770,5 +751,6 @@ proc isCanonicalAncestor*(c: ForkedChainRef, # canonical chain in database should have a marker # and the marker is block number - var canonHash: common.Hash32 - c.db.getBlockHash(blockNumber, canonHash) and canonHash == blockHash + let canonHash = c.db.getBlockHash(blockNumber).valueOr: + return false + canonHash == blockHash diff --git a/nimbus/core/chain/persist_blocks.nim b/nimbus/core/chain/persist_blocks.nim index c2d9214916..4d69511d66 100644 --- a/nimbus/core/chain/persist_blocks.nim +++ b/nimbus/core/chain/persist_blocks.nim @@ -12,7 +12,6 @@ import results, - ../../db/ledger, ../../evm/state, ../../evm/types, ../executor, @@ -157,10 +156,9 @@ proc persistBlocksImpl( let blockHash = header.blockHash() if NoPersistHeader notin flags: - if not c.db.persistHeader( - blockHash, header, c.com.proofOfStake(header), c.com.startOfHistory - ): - return err("Could not persist header") + ?c.db.persistHeader( + blockHash, header, + c.com.proofOfStake(header), c.com.startOfHistory) if NoPersistTransactions notin flags: c.db.persistTransactions(header.number, header.txRoot, blk.transactions) @@ -200,46 +198,6 @@ proc persistBlocksImpl( ok((blks, txs, gas)) -# ------------------------------------------------------------------------------ -# Public `ChainDB` methods -# ------------------------------------------------------------------------------ - -proc insertBlockWithoutSetHead*(c: ChainRef, blk: Block): Result[void, string] = - discard ?c.persistBlocksImpl([blk], {NoPersistHeader, NoPersistReceipts}) - - if not c.db.persistHeader(blk.header.blockHash, blk.header, c.com.startOfHistory): - return err("Could not persist header") - - ok() - -proc setCanonical*(c: ChainRef, header: Header): Result[void, string] = - if header.parentHash == default(Hash32): - if not c.db.setHead(header): - return err("setHead failed") - return ok() - - var body: BlockBody - if not c.db.getBlockBody(header, body): - debug "Failed to get BlockBody", hash = header.blockHash - return err("Could not get block body") - - discard - ?c.persistBlocksImpl( - [Block.init(header, move(body))], {NoPersistHeader, NoPersistTransactions} - ) - - if not c.db.setHead(header): - return err("setHead failed") - ok() - -proc setCanonical*(c: ChainRef, blockHash: Hash32): Result[void, string] = - var header: Header - if not c.db.getBlockHeader(blockHash, header): - debug "Failed to get Header", hash = blockHash - return err("Could not get block header") - - setCanonical(c, header) - proc persistBlocks*( c: ChainRef, blocks: openArray[Block], flags: PersistBlockFlags = {} ): Result[PersistStats, string] = diff --git a/nimbus/core/eip6110.nim b/nimbus/core/eip6110.nim index 63f27825ed..583ea9e4fd 100644 --- a/nimbus/core/eip6110.nim +++ b/nimbus/core/eip6110.nim @@ -14,8 +14,7 @@ import eth/common/receipts, stew/assign2, stew/arrayops, - results, - ../constants + results # ----------------------------------------------------------------------------- # Private helpers @@ -71,14 +70,13 @@ func depositLogToRequest(data: openArray[byte]): DepositRequest = # Public functions # ----------------------------------------------------------------------------- -func parseDepositLogs*(logs: openArray[Log]): Result[seq[byte], string] = - var res = newSeq[byte](logs.len*depositRequestSize) +func parseDepositLogs*(logs: openArray[Log], depositContractAddress: Address): Result[seq[byte], string] = + var res = newSeqOfCap[byte](logs.len*depositRequestSize) for i, log in logs: - if log.address == DEPOSIT_CONTRACT_ADDRESS: - if log.data.len != 576: - return err("deposit wrong length: want 576, have " & $log.data.len) - let offset = i*depositRequestSize - assign(res.toOpenArray(offset, offset+depositRequestSize-1), - depositLogToRequest(log.data)) + if log.address != depositContractAddress: + continue + if log.data.len != 576: + return err("deposit wrong length: want 576, have " & $log.data.len) + res.add depositLogToRequest(log.data) ok(move(res)) diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index 77c9c5d8b8..c4bb9a626b 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -170,8 +170,9 @@ proc procBlkEpilogue( blockNumber = header.number, expected = header.stateRoot, actual = stateRoot, - arrivedFrom = vmState.com.db.getCanonicalHead().stateRoot - return err("stateRoot mismatch") + arrivedFrom = vmState.parent.stateRoot + return err("stateRoot mismatch, expect: " & + $header.stateRoot & ", got: " & $stateRoot) if not skipReceipts: let bloom = createBloom(vmState.receipts) @@ -190,8 +191,8 @@ proc procBlkEpilogue( if header.requestsHash.isSome: let - depositReqs = ?parseDepositLogs(vmState.allLogs) - requestsHash = calcRequestsHashInsertType(depositReqs, withdrawalReqs, consolidationReqs) + depositReqs = ?parseDepositLogs(vmState.allLogs, vmState.com.depositContractAddress) + requestsHash = calcRequestsHash(depositReqs, withdrawalReqs, consolidationReqs) if header.requestsHash.get != requestsHash: debug "wrong requestsHash in block", diff --git a/nimbus/core/executor/process_transaction.nim b/nimbus/core/executor/process_transaction.nim index 0e7f9247b0..0411587290 100644 --- a/nimbus/core/executor/process_transaction.nim +++ b/nimbus/core/executor/process_transaction.nim @@ -197,7 +197,7 @@ proc processDequeueWithdrawalRequests*(vmState: BaseVMState): seq[byte] = sender : SYSTEM_ADDRESS, gasLimit : 30_000_000.GasInt, gasPrice : 0.GasInt, - to : WITHDRAWAL_REQUEST_ADDRESS, + to : WITHDRAWAL_QUEUE_ADDRESS, # It's a systemCall, no need for other knicks knacks sysCall : true, @@ -221,7 +221,7 @@ proc processDequeueConsolidationRequests*(vmState: BaseVMState): seq[byte] = sender : SYSTEM_ADDRESS, gasLimit : 30_000_000.GasInt, gasPrice : 0.GasInt, - to : CONSOLIDATION_REQUEST_ADDRESS, + to : CONSOLIDATION_QUEUE_ADDRESS, # It's a systemCall, no need for other knicks knacks sysCall : true, diff --git a/nimbus/core/tx_pool.nim b/nimbus/core/tx_pool.nim index 664b2794f8..ad87958bba 100644 --- a/nimbus/core/tx_pool.nim +++ b/nimbus/core/tx_pool.nim @@ -390,7 +390,7 @@ proc setHead(xp: TxPoolRef; val: Header) # ------------------------------------------------------------------------------ proc new*(T: type TxPoolRef; com: CommonRef): T - {.gcsafe,raises: [CatchableError].} = + {.gcsafe,raises: [].} = ## Constructor, returns a new tx-pool descriptor. new result result.init(com) @@ -482,7 +482,7 @@ proc assembleBlock*( ## Note that this getter runs *ad hoc* all the txs through the VM in ## order to build the block. - let pst = xp.packerVmExec().valueOr: # updates vmState + var pst = xp.packerVmExec().valueOr: # updates vmState return err(error) var blk = EthBlock( diff --git a/nimbus/core/tx_pool/tx_desc.nim b/nimbus/core/tx_pool/tx_desc.nim index 9ec323e167..5e981b5174 100644 --- a/nimbus/core/tx_pool/tx_desc.nim +++ b/nimbus/core/tx_pool/tx_desc.nim @@ -137,12 +137,12 @@ proc update(xp: TxPoolRef; parent: Header) = # Public functions, constructor # ------------------------------------------------------------------------------ -proc init*(xp: TxPoolRef; com: CommonRef) - {.gcsafe,raises: [CatchableError].} = +proc init*(xp: TxPoolRef; com: CommonRef) = ## Constructor, returns new tx-pool descriptor. xp.startDate = getTime().utc.toTime - xp.vmState = setupVMState(com, com.db.getCanonicalHead) + let head = com.db.getCanonicalHead.expect("Canonicalhead exists") + xp.vmState = setupVMState(com, head) xp.txDB = TxTabsRef.new xp.lifeTime = txItemLifeTime diff --git a/nimbus/core/tx_pool/tx_packer.nim b/nimbus/core/tx_pool/tx_packer.nim index d2a298c51c..eacd57b102 100644 --- a/nimbus/core/tx_pool/tx_packer.nim +++ b/nimbus/core/tx_pool/tx_packer.nim @@ -16,6 +16,7 @@ import stew/sorted_set, + stew/byteutils, ../../db/ledger, ../../common/common, ../../utils/utils, @@ -268,7 +269,7 @@ proc vmExecCommit(pst: var TxPacker): Result[void, string] = if vmState.fork >= FkPrague: pst.withdrawalReqs = processDequeueWithdrawalRequests(vmState) pst.consolidationReqs = processDequeueConsolidationRequests(vmState) - pst.depositReqs = ?parseDepositLogs(vmState.allLogs) + pst.depositReqs = ?parseDepositLogs(vmState.allLogs, vmState.com.depositContractAddress) # Finish up, then vmState.stateDB.stateRoot may be accessed stateDB.persist(clearEmptyAccount = vmState.fork >= FkSpurious) @@ -312,6 +313,12 @@ proc packerVmExec*(xp: TxPoolRef): Result[TxPacker, string] ok(pst) # Block chain will roll back automatically +func getExtraData(com: CommonRef): seq[byte] = + if com.extraData.len > 32: + com.extraData.toBytes[0..<32] + else: + com.extraData.toBytes + proc assembleHeader*(pst: TxPacker): Header = ## Generate a new header, a child of the cached `head` let @@ -331,7 +338,7 @@ proc assembleHeader*(pst: TxPacker): Header = gasLimit: vmState.blockCtx.gasLimit, gasUsed: vmState.cumulativeGasUsed, timestamp: pos.timestamp, - extraData: @[], + extraData: getExtraData(com), mixHash: pos.prevRandao, nonce: default(Bytes8), baseFeePerGas: vmState.blockCtx.baseFeePerGas, @@ -346,23 +353,17 @@ proc assembleHeader*(pst: TxPacker): Header = result.excessBlobGas = Opt.some vmState.blockCtx.excessBlobGas if com.isPragueOrLater(pos.timestamp): - let requestsHash = calcRequestsHashInsertType(pst.depositReqs, + let requestsHash = calcRequestsHash(pst.depositReqs, pst.withdrawalReqs, pst.consolidationReqs) result.requestsHash = Opt.some(requestsHash) func blockValue*(pst: TxPacker): UInt256 = pst.blockValue -func executionRequests*(pst: TxPacker): array[3, seq[byte]] = - result[0] = newSeqOfCap[byte](pst.depositReqs.len+1) - result[0].add 0x00.byte - result[0].add pst.depositReqs - result[1] = newSeqOfCap[byte](pst.withdrawalReqs.len+1) - result[1].add 0x01.byte - result[1].add pst.withdrawalReqs - result[2] = newSeqOfCap[byte](pst.consolidationReqs.len+1) - result[2].add 0x02.byte - result[2].add pst.consolidationReqs +func executionRequests*(pst: var TxPacker): array[3, seq[byte]] = + result[0] = move(pst.depositReqs) + result[1] = move(pst.withdrawalReqs) + result[2] = move(pst.consolidationReqs) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim b/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim index 997a9a1a12..3c8295cfb8 100644 --- a/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim +++ b/nimbus/core/tx_pool/tx_tasks/tx_dispose.nim @@ -109,7 +109,7 @@ proc disposeById*(xp: TxPoolRef; itemIDs: openArray[Hash32]; reason: TxInfo) ## unusable (e.g. with higher nonces for the same sender.) for itemID in itemIDs: let rcItem = xp.txDB.byItemID.eq(itemID) - if rcItem.isOK: + if rcItem.isOk: discard xp.txDB.dispose(rcItem.value, reason) # ------------------------------------------------------------------------------ diff --git a/nimbus/core/validate.nim b/nimbus/core/validate.nim index e7933e5e92..9a60ad0fab 100644 --- a/nimbus/core/validate.nim +++ b/nimbus/core/validate.nim @@ -15,7 +15,6 @@ import ../db/ledger, ../common/common, ../transaction/call_types, - ../errors, ../transaction, ../utils/utils, "."/[dao, eip4844, gaslimit, withdrawals], @@ -127,18 +126,11 @@ proc validateUncles(com: CommonRef; header: Header; else: uncleSet.incl uncleHash - let chainDB = com.db - let recentAncestorHashes = try: - chainDB.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header) - except CatchableError as err: - return err("Block not present in database") - - let recentUncleHashes = try: - chainDB.getUncleHashes(recentAncestorHashes) - except CatchableError as err: - return err("Ancenstors not present in database") - - let blockHash = header.blockHash + let + chainDB = com.db + recentAncestorHashes = ?chainDB.getAncestorsHashes(MAX_UNCLE_DEPTH + 1, header) + recentUncleHashes = ?chainDB.getUncleHashes(recentAncestorHashes) + blockHash = header.blockHash for uncle in uncles: let uncleHash = uncle.blockHash @@ -164,17 +156,11 @@ proc validateUncles(com: CommonRef; header: Header; return err("uncle block number larger than current block number") # check uncle against own parent - var parent: Header - if not chainDB.getBlockHeader(uncle.parentHash,parent): - return err("Uncle's parent has gone missing") + let parent = ?chainDB.getBlockHeader(uncle.parentHash) if uncle.timestamp <= parent.timestamp: return err("Uncle's parent must me older") - let uncleParent = try: - chainDB.getBlockHeader(uncle.parentHash) - except BlockNotFound: - return err("Uncle parent not found") - + let uncleParent = ?chainDB.getBlockHeader(uncle.parentHash) ? com.validateHeader( Block.init(uncle, BlockBody()), uncleParent, checkSealOK) diff --git a/nimbus/db/aristo.nim b/nimbus/db/aristo.nim index 7d36becb03..8914e70847 100644 --- a/nimbus/db/aristo.nim +++ b/nimbus/db/aristo.nim @@ -32,7 +32,6 @@ export leftPairs, # iterators rightPairs, rightPairsAccount, - rightPairsGeneric, rightPairsStorage import diff --git a/nimbus/db/aristo/aristo_api.nim b/nimbus/db/aristo/aristo_api.nim index 7f81f8edf7..d19666bbd0 100644 --- a/nimbus/db/aristo/aristo_api.nim +++ b/nimbus/db/aristo/aristo_api.nim @@ -57,27 +57,6 @@ type ## this leaf entry referres to a storage tree, this one will be deleted ## as well. - AristoApiDeleteGenericDataFn* = - proc(db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[bool,AristoError] - {.noRaise.} - ## Delete the leaf data entry addressed by the argument `path`. The MPT - ## sub-tree the leaf data entry is subsumed under is passed as argument - ## `root` which must be greater than `VertexID(1)` and smaller than - ## `LEAST_FREE_VID`. - ## - ## The return value is `true` if the argument `path` deleted was the last - ## one and the tree does not exist anymore. - - AristoApiDeleteGenericTreeFn* = - proc(db: AristoDbRef; - root: VertexID; - ): Result[void,AristoError] - {.noRaise.} - ## Variant of `deleteGenericData()` for purging the whole MPT sub-tree. - AristoApiDeleteStorageDataFn* = proc(db: AristoDbRef; accPath: Hash32; @@ -114,37 +93,17 @@ type {.noRaise.} ## Fetch an account record from the database indexed by `accPath`. - AristoApiFetchAccountStateRootFn* = + AristoApiFetchStateRootFn* = proc(db: AristoDbRef; - updateOk: bool; ): Result[Hash32,AristoError] {.noRaise.} - ## Fetch the Merkle hash of the account root. Force update if the - ## argument `updateOK` is set `true`. - - AristoApiFetchGenericDataFn* = - proc(db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[seq[byte],AristoError] - {.noRaise.} - ## For a generic sub-tree starting at `root`, fetch the data record - ## indexed by `path`. - - AristoApiFetchGenericStateFn* = - proc(db: AristoDbRef; - root: VertexID; - updateOk: bool; - ): Result[Hash32,AristoError] - {.noRaise.} - ## Fetch the Merkle hash of the argument `root`. Force update if the - ## argument `updateOK` is set `true`. + ## Fetch the Merkle hash of the account root. AristoApiFetchStorageDataFn* = proc(db: AristoDbRef; accPath: Hash32; stoPath: Hash32; - ): Result[Uint256,AristoError] + ): Result[UInt256,AristoError] {.noRaise.} ## For a storage tree related to account `accPath`, fetch the data ## record from the database indexed by `stoPath`. @@ -152,11 +111,9 @@ type AristoApiFetchStorageRootFn* = proc(db: AristoDbRef; accPath: Hash32; - updateOk: bool; ): Result[Hash32,AristoError] {.noRaise.} - ## Fetch the Merkle hash of the storage root related to `accPath`. Force - ## update if the argument `updateOK` is set `true`. + ## Fetch the Merkle hash of the storage root related to `accPath`. AristoApiFindTxFn* = proc(db: AristoDbRef; @@ -245,15 +202,6 @@ type ## For an account record indexed by `accPath` query whether this record ## exists on the database. - AristoApiHasPathGenericFn* = - proc(db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[bool,AristoError] - {.noRaise.} - ## For a generic sub-tree starting at `root` and indexed by `path`, - ## mquery whether this record exists on the database. - AristoApiHasPathStorageFn* = proc(db: AristoDbRef; accPath: Hash32; @@ -306,16 +254,6 @@ type ## not on the database already or the value differend from `accRec`, and ## `false` otherwise. - AristoApiMergeGenericDataFn* = - proc(db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - data: openArray[byte]; - ): Result[bool,AristoError] - {.noRaise.} - ## Variant of `mergeXXX()` for generic sub-trees, i.e. for arguments - ## `root` greater than `VertexID(1)` and smaller than `LEAST_FREE_VID`. - AristoApiMergeStorageDataFn* = proc(db: AristoDbRef; accPath: Hash32; @@ -339,17 +277,6 @@ type ## ## Errors will only be returned for invalid paths. - AristoApiPartGenericTwig* = - proc(db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[(seq[seq[byte]],bool), AristoError] - {.noRaise.} - ## Variant of `partAccountTwig()`. - ## - ## Note: This function provides a functionality comparable to the - ## `getBranch()` function from `hexary.nim` - AristoApiPartStorageTwig* = proc(db: AristoDbRef; accPath: Hash32; @@ -488,17 +415,13 @@ type commit*: AristoApiCommitFn deleteAccountRecord*: AristoApiDeleteAccountRecordFn - deleteGenericData*: AristoApiDeleteGenericDataFn - deleteGenericTree*: AristoApiDeleteGenericTreeFn deleteStorageData*: AristoApiDeleteStorageDataFn deleteStorageTree*: AristoApiDeleteStorageTreeFn fetchLastSavedState*: AristoApiFetchLastSavedStateFn fetchAccountRecord*: AristoApiFetchAccountRecordFn - fetchAccountStateRoot*: AristoApiFetchAccountStateRootFn - fetchGenericData*: AristoApiFetchGenericDataFn - fetchGenericState*: AristoApiFetchGenericStateFn + fetchStateRoot*: AristoApiFetchStateRootFn fetchStorageData*: AristoApiFetchStorageDataFn fetchStorageRoot*: AristoApiFetchStorageRootFn @@ -507,7 +430,6 @@ type forget*: AristoApiForgetFn forkTx*: AristoApiForkTxFn hasPathAccount*: AristoApiHasPathAccountFn - hasPathGeneric*: AristoApiHasPathGenericFn hasPathStorage*: AristoApiHasPathStorageFn hasStorageData*: AristoApiHasStorageDataFn @@ -516,11 +438,9 @@ type nForked*: AristoApiNForkedFn mergeAccountRecord*: AristoApiMergeAccountRecordFn - mergeGenericData*: AristoApiMergeGenericDataFn mergeStorageData*: AristoApiMergeStorageDataFn partAccountTwig*: AristoApiPartAccountTwig - partGenericTwig*: AristoApiPartGenericTwig partStorageTwig*: AristoApiPartStorageTwig partUntwigGeneric*: AristoApiPartUntwigGeneric partUntwigGenericOk*: AristoApiPartUntwigGenericOk @@ -542,17 +462,13 @@ type AristoApiProfCommitFn = "commit" AristoApiProfDeleteAccountRecordFn = "deleteAccountRecord" - AristoApiProfDeleteGenericDataFn = "deleteGnericData" - AristoApiProfDeleteGenericTreeFn = "deleteGnericTree" AristoApiProfDeleteStorageDataFn = "deleteStorageData" AristoApiProfDeleteStorageTreeFn = "deleteStorageTree" AristoApiProfFetchLastSavedStateFn = "fetchLastSavedState" AristoApiProfFetchAccountRecordFn = "fetchAccountRecord" - AristoApiProfFetchAccountStateRootFn = "fetchAccountStateRoot" - AristoApiProfFetchGenericDataFn = "fetchGenericData" - AristoApiProfFetchGenericStateFn = "fetchGenericState" + AristoApiProfFetchStateRootFn = "fetchStateRoot" AristoApiProfFetchStorageDataFn = "fetchStorageData" AristoApiProfFetchStorageRootFn = "fetchStorageRoot" @@ -562,7 +478,6 @@ type AristoApiProfForkTxFn = "forkTx" AristoApiProfHasPathAccountFn = "hasPathAccount" - AristoApiProfHasPathGenericFn = "hasPathGeneric" AristoApiProfHasPathStorageFn = "hasPathStorage" AristoApiProfHasStorageDataFn = "hasStorageData" @@ -571,14 +486,10 @@ type AristoApiProfNForkedFn = "nForked" AristoApiProfMergeAccountRecordFn = "mergeAccountRecord" - AristoApiProfMergeGenericDataFn = "mergeGenericData" AristoApiProfMergeStorageDataFn = "mergeStorageData" AristoApiProfPartAccountTwigFn = "partAccountTwig" - AristoApiProfPartGenericTwigFn = "partGenericTwig" AristoApiProfPartStorageTwigFn = "partStorageTwig" - AristoApiProfPartUntwigGenericFn = "partUntwigGeneric" - AristoApiProfPartUntwigGenericOkFn = "partUntwigGenericOk" AristoApiProfPartUntwigPathFn = "partUntwigPath" AristoApiProfPartUntwigPathOkFn = "partUntwigPathOk" @@ -595,7 +506,6 @@ type AristoApiProfBeGetTuvFn = "be/getTuv" AristoApiProfBeGetLstFn = "be/getLst" AristoApiProfBePutVtxFn = "be/putVtx" - AristoApiProfBePutKeyFn = "be/putKey" AristoApiProfBePutTuvFn = "be/putTuv" AristoApiProfBePutLstFn = "be/putLst" AristoApiProfBePutEndFn = "be/putEnd" @@ -614,17 +524,13 @@ when AutoValidateApiHooks: doAssert not api.commit.isNil doAssert not api.deleteAccountRecord.isNil - doAssert not api.deleteGenericData.isNil - doAssert not api.deleteGenericTree.isNil doAssert not api.deleteStorageData.isNil doAssert not api.deleteStorageTree.isNil doAssert not api.fetchLastSavedState.isNil doAssert not api.fetchAccountRecord.isNil - doAssert not api.fetchAccountStateRoot.isNil - doAssert not api.fetchGenericData.isNil - doAssert not api.fetchGenericState.isNil + doAssert not api.fetchStateRoot.isNil doAssert not api.fetchStorageData.isNil doAssert not api.fetchStorageRoot.isNil @@ -634,7 +540,6 @@ when AutoValidateApiHooks: doAssert not api.forkTx.isNil doAssert not api.hasPathAccount.isNil - doAssert not api.hasPathGeneric.isNil doAssert not api.hasPathStorage.isNil doAssert not api.hasStorageData.isNil @@ -643,14 +548,10 @@ when AutoValidateApiHooks: doAssert not api.nForked.isNil doAssert not api.mergeAccountRecord.isNil - doAssert not api.mergeGenericData.isNil doAssert not api.mergeStorageData.isNil doAssert not api.partAccountTwig.isNil - doAssert not api.partGenericTwig.isNil doAssert not api.partStorageTwig.isNil - doAssert not api.partUntwigGeneric.isNil - doAssert not api.partUntwigGenericOk.isNil doAssert not api.partUntwigPath.isNil doAssert not api.partUntwigPathOk.isNil @@ -690,17 +591,13 @@ func init*(api: var AristoApiObj) = api.commit = commit api.deleteAccountRecord = deleteAccountRecord - api.deleteGenericData = deleteGenericData - api.deleteGenericTree = deleteGenericTree api.deleteStorageData = deleteStorageData api.deleteStorageTree = deleteStorageTree api.fetchLastSavedState = fetchLastSavedState api.fetchAccountRecord = fetchAccountRecord - api.fetchAccountStateRoot = fetchAccountStateRoot - api.fetchGenericData = fetchGenericData - api.fetchGenericState = fetchGenericState + api.fetchStateRoot = fetchStateRoot api.fetchStorageData = fetchStorageData api.fetchStorageRoot = fetchStorageRoot @@ -710,7 +607,6 @@ func init*(api: var AristoApiObj) = api.forkTx = forkTx api.hasPathAccount = hasPathAccount - api.hasPathGeneric = hasPathGeneric api.hasPathStorage = hasPathStorage api.hasStorageData = hasStorageData @@ -719,14 +615,10 @@ func init*(api: var AristoApiObj) = api.nForked = nForked api.mergeAccountRecord = mergeAccountRecord - api.mergeGenericData = mergeGenericData api.mergeStorageData = mergeStorageData api.partAccountTwig = partAccountTwig - api.partGenericTwig = partGenericTwig api.partStorageTwig = partStorageTwig - api.partUntwigGeneric = partUntwigGeneric - api.partUntwigGenericOk = partUntwigGenericOk api.partUntwigPath = partUntwigPath api.partUntwigPathOk = partUntwigPathOk @@ -749,16 +641,12 @@ func dup*(api: AristoApiRef): AristoApiRef = commit: api.commit, deleteAccountRecord: api.deleteAccountRecord, - deleteGenericData: api.deleteGenericData, - deleteGenericTree: api.deleteGenericTree, deleteStorageData: api.deleteStorageData, deleteStorageTree: api.deleteStorageTree, fetchLastSavedState: api.fetchLastSavedState, fetchAccountRecord: api.fetchAccountRecord, - fetchAccountStateRoot: api.fetchAccountStateRoot, - fetchGenericData: api.fetchGenericData, - fetchGenericState: api.fetchGenericState, + fetchStateRoot: api.fetchStateRoot, fetchStorageData: api.fetchStorageData, fetchStorageRoot: api.fetchStorageRoot, @@ -768,7 +656,6 @@ func dup*(api: AristoApiRef): AristoApiRef = forkTx: api.forkTx, hasPathAccount: api.hasPathAccount, - hasPathGeneric: api.hasPathGeneric, hasPathStorage: api.hasPathStorage, hasStorageData: api.hasStorageData, @@ -777,14 +664,10 @@ func dup*(api: AristoApiRef): AristoApiRef = nForked: api.nForked, mergeAccountRecord: api.mergeAccountRecord, - mergeGenericData: api.mergeGenericData, mergeStorageData: api.mergeStorageData, partAccountTwig: api.partAccountTwig, - partGenericTwig: api.partGenericTwig, partStorageTwig: api.partStorageTwig, - partUntwigGeneric: api.partUntwigGeneric, - partUntwigGenericOk: api.partUntwigGenericOk, partUntwigPath: api.partUntwigPath, partUntwigPathOk: api.partUntwigPathOk, @@ -835,16 +718,6 @@ func init*( AristoApiProfDeleteAccountRecordFn.profileRunner: result = api.deleteAccountRecord(a, b) - profApi.deleteGenericData = - proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto = - AristoApiProfDeleteGenericDataFn.profileRunner: - result = api.deleteGenericData(a, b, c) - - profApi.deleteGenericTree = - proc(a: AristoDbRef; b: VertexID): auto = - AristoApiProfDeleteGenericTreeFn.profileRunner: - result = api.deleteGenericTree(a, b) - profApi.deleteStorageData = proc(a: AristoDbRef; b: Hash32, c: Hash32): auto = AristoApiProfDeleteStorageDataFn.profileRunner: @@ -865,20 +738,10 @@ func init*( AristoApiProfFetchAccountRecordFn.profileRunner: result = api.fetchAccountRecord(a, b) - profApi.fetchAccountStateRoot = + profApi.fetchStateRoot = proc(a: AristoDbRef; b: bool): auto = - AristoApiProfFetchAccountStateRootFn.profileRunner: - result = api.fetchAccountStateRoot(a, b) - - profApi.fetchGenericData = - proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto = - AristoApiProfFetchGenericDataFn.profileRunner: - result = api.fetchGenericData(a, b, c) - - profApi.fetchGenericState = - proc(a: AristoDbRef; b: VertexID; c: bool): auto = - AristoApiProfFetchGenericStateFn.profileRunner: - result = api.fetchGenericState(a, b, c) + AristoApiProfFetchStateRootFn.profileRunner: + result = api.fetchStateRoot(a, b) profApi.fetchStorageData = proc(a: AristoDbRef; b, stoPath: Hash32): auto = @@ -886,9 +749,9 @@ func init*( result = api.fetchStorageData(a, b, stoPath) profApi.fetchStorageRoot = - proc(a: AristoDbRef; b: Hash32; c: bool): auto = + proc(a: AristoDbRef; b: Hash32): auto = AristoApiProfFetchStorageRootFn.profileRunner: - result = api.fetchStorageRoot(a, b, c) + result = api.fetchStorageRoot(a, b) profApi.findTx = proc(a: AristoDbRef; b: RootedVertexID; c: HashKey): auto = @@ -915,11 +778,6 @@ func init*( AristoApiProfHasPathAccountFn.profileRunner: result = api.hasPathAccount(a, b) - profApi.hasPathGeneric = - proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto = - AristoApiProfHasPathGenericFn.profileRunner: - result = api.hasPathGeneric(a, b, c) - profApi.hasPathStorage = proc(a: AristoDbRef; b, c: Hash32): auto = AristoApiProfHasPathStorageFn.profileRunner: @@ -950,13 +808,8 @@ func init*( AristoApiProfMergeAccountRecordFn.profileRunner: result = api.mergeAccountRecord(a, b, c) - profApi.mergeGenericData = - proc(a: AristoDbRef; b: VertexID, c, d: openArray[byte]): auto = - AristoApiProfMergeGenericDataFn.profileRunner: - result = api.mergeGenericData(a, b, c, d) - profApi.mergeStorageData = - proc(a: AristoDbRef; b, c: Hash32, d: Uint256): auto = + proc(a: AristoDbRef; b, c: Hash32, d: UInt256): auto = AristoApiProfMergeStorageDataFn.profileRunner: result = api.mergeStorageData(a, b, c, d) @@ -965,26 +818,11 @@ func init*( AristoApiProfPartAccountTwigFn.profileRunner: result = api.partAccountTwig(a, b) - profApi.partGenericTwig = - proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto = - AristoApiProfPartGenericTwigFn.profileRunner: - result = api.partGenericTwig(a, b, c) - profApi.partStorageTwig = proc(a: AristoDbRef; b: Hash32; c: Hash32): auto = AristoApiProfPartStorageTwigFn.profileRunner: result = api.partStorageTwig(a, b, c) - profApi.partUntwigGeneric = - proc(a: openArray[seq[byte]]; b: Hash32; c: openArray[byte]): auto = - AristoApiProfPartUntwigGenericFn.profileRunner: - result = api.partUntwigGeneric(a, b, c) - - profApi.partUntwigGenericOk = - proc(a: openArray[seq[byte]]; b:Hash32; c:openArray[byte]; d:Opt[seq[byte]]): auto = - AristoApiProfPartUntwigGenericOkFn.profileRunner: - result = api.partUntwigGenericOk(a, b, c, d) - profApi.partUntwigPath = proc(a: openArray[seq[byte]]; b, c: Hash32): auto = AristoApiProfPartUntwigPathFn.profileRunner: @@ -1065,12 +903,6 @@ func init*( be.putVtxFn(a, b, c) data.list[AristoApiProfBePutVtxFn.ord].masked = true - beDup.putKeyFn = - proc(a: PutHdlRef; b: RootedVertexID, c: HashKey) = - AristoApiProfBePutKeyFn.profileRunner: - be.putKeyFn(a, b, c) - data.list[AristoApiProfBePutKeyFn.ord].masked = true - beDup.putTuvFn = proc(a: PutHdlRef; b: VertexID) = AristoApiProfBePutTuvFn.profileRunner: diff --git a/nimbus/db/aristo/aristo_blobify.nim b/nimbus/db/aristo/aristo_blobify.nim index eb3ea75b26..98d287ee49 100644 --- a/nimbus/db/aristo/aristo_blobify.nim +++ b/nimbus/db/aristo/aristo_blobify.nim @@ -68,10 +68,12 @@ proc deblobify*[T: uint64|VertexID](data: openArray[byte], _: type T): Result[T, if data.len < 1 or data.len > 8: return err(Deblob64LenUnsupported) - var tmp: array[8, byte] - discard tmp.toOpenArray(8 - data.len, 7).copyFrom(data) + var tmp = 0'u64 + let start = 8 - data.len + for i in 0.. 32: @@ -124,10 +126,6 @@ proc load256(data: openArray[byte]; start: var int, len: int): Result[UInt256,Ar proc blobifyTo*(pyl: LeafPayload, data: var seq[byte]) = case pyl.pType - of RawData: - data &= pyl.rawBlob - data &= [0x10.byte] - of AccountData: # `lens` holds `len-1` since `mask` filters out the zero-length case (which # allows saving 1 bit per length) @@ -161,21 +159,22 @@ proc blobifyTo*(pyl: LeafPayload, data: var seq[byte]) = data &= pyl.stoData.blobify().data data &= [0x20.byte] -proc blobifyTo*(vtx: VertexRef; data: var seq[byte]): Result[void,AristoError] = +proc blobifyTo*(vtx: VertexRef; key: HashKey, data: var seq[byte]): Result[void,AristoError] = ## This function serialises the vertex argument to a database record. ## Contrary to RLP based serialisation, these records aim to align on ## fixed byte boundaries. ## :: ## Branch: + ## -- optional hash key ## [VertexID, ..] -- list of up to 16 child vertices lookup keys ## seq[byte] -- hex encoded partial path (non-empty for extension nodes) ## uint64 -- lengths of each child vertex, each taking 4 bits - ## 0x80 + xx -- marker(2) + pathSegmentLen(6) + ## 0x80 + xx -- marker(0/2) + pathSegmentLen(6) ## ## Leaf: ## seq[byte] -- opaque leaf data payload (might be zero length) ## seq[byte] -- hex encoded partial path (at least one byte) - ## 0xc0 + yy -- marker(2) + partialPathLen(6) + ## 0xc0 + yy -- marker(3) + partialPathLen(6) ## ## For a branch record, the bytes of the `access` array indicate the position ## of the Patricia Trie vertex reference. So the `vertexID` with index `n` has @@ -186,6 +185,13 @@ proc blobifyTo*(vtx: VertexRef; data: var seq[byte]): Result[void,AristoError] = return err(BlobifyNilVertex) case vtx.vType: of Branch: + let code = if key.isValid: + data.add byte(key.len) + data.add key.data() + # TODO using 0 here for legacy reasons - a bit flag would be easier + 0'u8 shl 6 + else: + 2'u8 shl 6 var lens = 0u64 pos = data.len @@ -209,7 +215,7 @@ proc blobifyTo*(vtx: VertexRef; data: var seq[byte]): Result[void,AristoError] = data &= pSegm.data() data &= lens.toBytesBE - data &= [0x80u8 or psLen] + data &= [code or psLen] of Leaf: let @@ -219,14 +225,14 @@ proc blobifyTo*(vtx: VertexRef; data: var seq[byte]): Result[void,AristoError] = return err(BlobifyLeafPathOverflow) vtx.lData.blobifyTo(data) data &= pSegm.data() - data &= [0xC0u8 or psLen] + data &= [(3'u8 shl 6) or psLen] ok() -proc blobify*(vtx: VertexRef): seq[byte] = +proc blobify*(vtx: VertexRef, key: HashKey): seq[byte] = ## Variant of `blobify()` result = newSeqOfCap[byte](128) - if vtx.blobifyTo(result).isErr: + if vtx.blobifyTo(key, result).isErr: result.setLen(0) # blobify only fails on invalid verticies proc blobifyTo*(lSst: SavedState; data: var seq[byte]): Result[void,AristoError] = @@ -248,45 +254,42 @@ proc deblobify( pyl: var LeafPayload; ): Result[void,AristoError] = if data.len == 0: - pyl = LeafPayload(pType: RawData) - return ok() + return err(DeblobVtxTooShort) let mask = data[^1] - if (mask and 0x10) > 0: # unstructured payload - pyl = LeafPayload(pType: RawData, rawBlob: data[0 .. ^2]) - return ok() - if (mask and 0x20) > 0: # Slot storage data pyl = LeafPayload( pType: StoData, stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256)) - return ok() + ok() + elif (mask and 0xf0) == 0: # Only account fields set + pyl = LeafPayload(pType: AccountData) + var + start = 0 + lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2)) - pyl = LeafPayload(pType: AccountData) - var - start = 0 - lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2)) + if (mask and 0x01) > 0: + let len = lens and 0b111 + pyl.account.nonce = ? load64(data, start, int(len + 1)) - if (mask and 0x01) > 0: - let len = lens and 0b111 - pyl.account.nonce = ? load64(data, start, int(len + 1)) + if (mask and 0x02) > 0: + let len = (lens shr 3) and 0b11111 + pyl.account.balance = ? load256(data, start, int(len + 1)) - if (mask and 0x02) > 0: - let len = (lens shr 3) and 0b11111 - pyl.account.balance = ? load256(data, start, int(len + 1)) + if (mask and 0x04) > 0: + let len = (lens shr 8) and 0b111 + pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1)))) - if (mask and 0x04) > 0: - let len = (lens shr 8) and 0b111 - pyl.stoID = (true, VertexID(? load64(data, start, int(len + 1)))) + if (mask and 0x08) > 0: + if data.len() < start + 32: + return err(DeblobCodeLenUnsupported) + discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31)) + else: + pyl.account.codeHash = EMPTY_CODE_HASH - if (mask and 0x08) > 0: - if data.len() < start + 32: - return err(DeblobCodeLenUnsupported) - discard pyl.account.codeHash.data.copyFrom(data.toOpenArray(start, start + 31)) + ok() else: - pyl.account.codeHash = EMPTY_CODE_HASH - - ok() + err(DeblobUnknown) proc deblobifyType*(record: openArray[byte]; T: type VertexRef): Result[VertexType, AristoError] = @@ -294,7 +297,7 @@ proc deblobifyType*(record: openArray[byte]; T: type VertexRef): return err(DeblobVtxTooShort) ok case record[^1] shr 6: - of 2: Branch + of 0, 2: Branch of 3: Leaf else: return err(DeblobUnknown) @@ -307,16 +310,20 @@ proc deblobify*( ## argument `vtx` can be `nil`. if record.len < 3: # minimum `Leaf` record return err(DeblobVtxTooShort) - - ok case record[^1] shr 6: - of 2: # `Branch` vertex - if record.len < 11: # at least two edges + let kind = record[^1] shr 6 + let start = if kind == 0: + int(record[0] + 1) + else: + 0 + ok case kind: + of 0, 2: # `Branch` vertex + if record.len - start < 11: # at least two edges return err(DeblobBranchTooShort) let aInx = record.len - 9 aIny = record.len - 2 var - offs = 0 + offs = start lens = uint64.fromBytesBE record.toOpenArray(aInx, aIny) # bitmap vtxList: array[16,VertexID] n = 0 @@ -353,12 +360,18 @@ proc deblobify*( vType: Leaf, pfx: pathSegment) - ? record.toOpenArray(0, pLen - 1).deblobify(vtx.lData) + ? record.toOpenArray(start, pLen - 1).deblobify(vtx.lData) vtx else: return err(DeblobUnknown) +proc deblobify*(record: openArray[byte], T: type HashKey): Opt[HashKey] = + if record.len > 1 and ((record[^1] shr 6) == 0) and (int(record[0]) + 1) < record.len: + HashKey.fromBytes(record.toOpenArray(1, int(record[0]))) + else: + Opt.none(HashKey) + proc deblobify*( data: openArray[byte]; T: type SavedState; diff --git a/nimbus/db/aristo/aristo_check/check_be.nim b/nimbus/db/aristo/aristo_check/check_be.nim index 6bee548d42..e7d98f2295 100644 --- a/nimbus/db/aristo/aristo_check/check_be.nim +++ b/nimbus/db/aristo/aristo_check/check_be.nim @@ -52,7 +52,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef]( for (rvid,key) in T.walkKeyBe db: if topVidBe.vid < rvid.vid: topVidBe = rvid - let vtx = db.getVtxBE(rvid).valueOr: + let _ = db.getVtxBE(rvid).valueOr: return err((rvid.vid,CheckBeVtxMissing)) # Compare calculated `vTop` against database state diff --git a/nimbus/db/aristo/aristo_compute.nim b/nimbus/db/aristo/aristo_compute.nim index 41e868b342..8bbb6c29ca 100644 --- a/nimbus/db/aristo/aristo_compute.nim +++ b/nimbus/db/aristo/aristo_compute.nim @@ -120,14 +120,18 @@ proc flush(batch: var WriteBatch, db: AristoDbRef): Result[void, AristoError] = batch.writer = nil ok() -proc putKey( - batch: var WriteBatch, db: AristoDbRef, rvid: RootedVertexID, key: HashKey +proc putVtx( + batch: var WriteBatch, + db: AristoDbRef, + rvid: RootedVertexID, + vtx: VertexRef, + key: HashKey, ): Result[void, AristoError] = if batch.writer == nil: doAssert db.backend != nil, "source data is from the backend" batch.writer = ?db.backend.putBegFn() - db.backend.putKeyFn(batch.writer, rvid, key) + db.backend.putVtxFn(batch.writer, rvid, vtx, key) batch.count += 1 ok() @@ -150,6 +154,7 @@ func leave(batch: var WriteBatch, nibble: int) = proc putKeyAtLevel( db: AristoDbRef, rvid: RootedVertexID, + vtx: VertexRef, key: HashKey, level: int, batch: var WriteBatch, @@ -159,10 +164,8 @@ proc putKeyAtLevel( ## set (vertex data may have been committed to disk without computing the ## corresponding hash!) - # Only put computed keys in the database which keeps churn down by focusing on - # the ones that do not change! if level == -2: - ?batch.putKey(db, rvid, key) + ?batch.putVtx(db, rvid, vtx, key) if batch.count mod batchSize == 0: ?batch.flush(db) @@ -172,6 +175,7 @@ proc putKeyAtLevel( else: debug "Writing computeKey cache", keys = batch.count, accounts = batch.progress else: + db.deltaAtLevel(level).sTab[rvid] = vtx db.deltaAtLevel(level).kMap[rvid] = key ok() @@ -215,11 +219,11 @@ proc computeKeyImpl( # empty state if bloom == nil or bloom[].query(uint64(rvid.vid)): db.getKeyRc(rvid).isErrOr: + # Value cached either in layers or database return ok value let (vtx, vl) = ?db.getVtxRc(rvid, {GetVtxFlag.PeekCache}) - # Top-most level of all the verticies this hash compution depends on var level = vl @@ -249,8 +253,6 @@ proc computeKeyImpl( storageRoot: skey.to(Hash32), codeHash: vtx.lData.account.codeHash, ) - of RawData: - vtx.lData.rawBlob of StoData: # TODO avoid memory allocation when encoding storage data rlp.encode(vtx.lData.stoData) @@ -281,8 +283,9 @@ proc computeKeyImpl( # likely to live in an in-memory layer since any leaf change will lead to the # root key also changing while leaves that have never been hashed will see # their hash being saved directly to the backend. - ?db.putKeyAtLevel(rvid, key, level, batch) + if vtx.vType != Leaf: + ?db.putKeyAtLevel(rvid, vtx, key, level, batch) ok (key, level) proc computeKeyImpl( @@ -310,16 +313,11 @@ proc computeKey*( ## state/hash, it must be converted to a `Hash32` (using (`.to(Hash32)`) as ## in `db.computeKey(rvid).value.to(Hash32)` which always results in a ## 32 byte value. - computeKeyImpl(db, rvid, nil) proc computeLeafKeysImpl( T: type, db: AristoDbRef, root: VertexID ): Result[void, AristoError] = - for x in T.walkKeyBe(db): - debug "Skipping leaf key computation, cache is not empty" - return ok() - # Key computation function that works by iterating over the entries in the # database (instead of traversing trie using point lookups) - due to how # rocksdb is organised, this cache-friendly traversal order turns out to be @@ -328,6 +326,27 @@ proc computeLeafKeysImpl( # branches whose children were computed in the previous round one "layer" # at a time until the the number of successfully computed nodes grows low. # TODO progress indicator + + block: + if db.getKeyUbe((root, root)).isOk(): + return ok() # Fast path for when the root is in the database already + + # Smoke check to see if we can find lots of branch nodes with keys already + var branches, found: int + for (rvid, vtx) in T.walkVtxBe(db, {Branch}): + branches += 1 + + if db.getKeyUbe(rvid).isOk: + found += 1 + + # 10% found on the initial sample.. good enough? Some more randomness + # here would maybe make sense + if branches > 1000: + if found * 10 > branches: + return ok() + break + + info "Writing key cache (this may take a while)" var batch: WriteBatch @@ -342,13 +361,18 @@ proc computeLeafKeysImpl( # Reuse rlp writers to avoid superfluous memory allocations writer = initRlpWriter() writer2 = initRlpWriter() + writer3 = initRlpWriter() level = 0 - - # Start with leaves - at the time of writing, this is roughly 3/4 of the - # of the entries in the database on mainnet - the ratio roughly corresponds to - # the fill ratio of the deepest branch nodes as nodes close to the MPT root - # don't come in significant numbers - + leaves = 0 + + # Load leaves into bloom filter so we can quickly skip over branch nodes where + # we know the lookup will fail. + # At the time of writing, this is roughly 3/4 of the of the entries in the + # database on mainnet - the ratio roughly corresponds to the fill ratio of the + # deepest branch nodes as nodes close to the MPT root don't come in + # significant numbers + # Leaf keys are not computed to save space - instead, if they are needed they + # are computed from the leaf data. for (rvid, vtx) in T.walkVtxBe(db, {Leaf}): if vtx.lData.pType == AccountData and vtx.lData.stoID.isValid: # Accounts whose key depends on the storage trie typically will not yet @@ -357,46 +381,14 @@ proc computeLeafKeysImpl( # be computed and then top up during regular trie traversal. continue - writer.clear() - - let key = writer.encodeLeaf(vtx.pfx): - case vtx.lData.pType - of AccountData: - writer2.clear() - writer2.append Account( - nonce: vtx.lData.account.nonce, - balance: vtx.lData.account.balance, - # Accounts with storage filtered out above - storageRoot: default(Hash32), - codeHash: vtx.lData.account.codeHash, - ) - writer2.finish() - of RawData: - vtx.lData.rawBlob - of StoData: - writer2.clear() - writer2.append(vtx.lData.stoData) - writer2.finish() - - ?batch.putKey(db, rvid, key) - - if batch.count mod batchSize == 0: - ?batch.flush(db) - - if batch.count mod (batchSize * 100) == 0: - info "Writing leaves", keys = batch.count, level - else: - debug "Writing leaves", keys = batch.count, level - bloom.insert(uint64(rvid.vid)) + leaves += 1 - let leaves = batch.count - - # The leaves have been written - we'll now proceed to branches expecting - # diminishing returns for each layer - not only beacuse there are fewer nodes - # closer to the root in the trie but also because leaves we skipped over lead - # larger and larger branch gaps and the advantage of iterating in disk order - # is lost + # The leaves have been loaded into the bloom filter - we'll now proceed to + # branches expecting diminishing returns for each layer - not only beacuse + # there are fewer nodes closer to the root in the trie but also because leaves + # we skipped over lead larger and larger branch gaps and the advantage of + # iterating in disk order is lost var lastRound = leaves level += 1 @@ -404,12 +396,17 @@ proc computeLeafKeysImpl( # 16*16 looks like "2 levels of MPT" but in reality, the branch nodes close # to the leaves are sparse - on average about 4 nodes per branch on mainnet - # meaning that we'll do 3-4 levels of branch depending on the network + var branches = 0 while lastRound > (leaves div (16 * 16)): info "Starting branch layer", keys = batch.count, lastRound, level var round = 0 + branches = 0 + for (rvid, vtx) in T.walkVtxBe(db, {Branch}): + branches += 1 + if vtx.pfx.len > 0: - # TODO there shouldn't be many of these - is it worth the lookup? + # TODO there shouldn't be many extension nodes - is it worth the lookup? continue if level > 1: @@ -431,14 +428,50 @@ proc computeLeafKeysImpl( let key = writer.encodeBranch: let vid = vtx.bVid[n] if vid.isValid: - let bkey = db.getKeyUbe((rvid.root, vid)).valueOr: - # False positive on the bloom filter lookup - break branchKey - bkey + let bkeyOpt = + if level == 1: # No leaf keys in database + Result[HashKey, AristoError].err(GetKeyNotFound) + else: + db.getKeyUbe((rvid.root, vid)) + bkeyOpt.valueOr: + let bvtx = db.getVtxUbe((rvid.root, vid)).valueOr: + # Incomplete database? + break branchKey + + if bvtx == nil or ( + bvtx.vType == Leaf and bvtx.lData.pType == AccountData and + bvtx.lData.stoID.isValid + ): + # It's unlikely storage root key has been computed already, so + # skip + # TODO maybe worth revisting - a not insignificant number of + # contracts have only a leaf storage slot so for those we + # could trivially compute account storage root.. + break branchKey + case bvtx.vType + of Leaf: + writer2.clear() + + writer2.encodeLeaf(bvtx.pfx): + writer3.clear() + case bvtx.lData.pType + of AccountData: + writer3.append Account( + nonce: bvtx.lData.account.nonce, + balance: bvtx.lData.account.balance, + # Accounts with storage filtered out above + storageRoot: EMPTY_ROOT_HASH, + codeHash: bvtx.lData.account.codeHash, + ) + of StoData: + writer3.append(bvtx.lData.stoData) + writer3.finish() + of Branch: + break branchKey else: VOID_HASH_KEY - ?batch.putKey(db, rvid, key) + ?batch.putVtx(db, rvid, vtx, key) if batch.count mod batchSize == 0: ?batch.flush(db) @@ -456,7 +489,7 @@ proc computeLeafKeysImpl( ?batch.flush(db) info "Key cache base written", - keys = batch.count, lastRound, leaves, branches = batch.count - leaves + keys = batch.count, lastRound, leaves, branches let rc = computeKeyImpl(db, (root, root), addr bloom) if rc.isOk() or rc.error() == GetVtxNotFound: @@ -474,6 +507,7 @@ proc computeKeys*(db: AristoDbRef, root: VertexID): Result[void, AristoError] = ## ## This implementation speeds up the inital seeding of the cache by traversing ## the full state in on-disk order and computing hashes bottom-up instead. + case db.backend.kind of BackendMemory: MemBackendRef.computeLeafKeysImpl db, root diff --git a/nimbus/db/aristo/aristo_debug.nim b/nimbus/db/aristo/aristo_debug.nim index ab64e9cdb2..1b49c43f9b 100644 --- a/nimbus/db/aristo/aristo_debug.nim +++ b/nimbus/db/aristo/aristo_debug.nim @@ -180,8 +180,6 @@ func ppAriAccount(a: AristoAccount): string = func ppPayload(p: LeafPayload, db: AristoDbRef): string = case p.pType: - of RawData: - result &= p.rawBlob.toHex.squeeze(hex=true) of AccountData: result = "(" & p.account.ppAriAccount() & "," & p.stoID.ppVid & ")" of StoData: diff --git a/nimbus/db/aristo/aristo_delete.nim b/nimbus/db/aristo/aristo_delete.nim index a466f9252e..504609f20f 100644 --- a/nimbus/db/aristo/aristo_delete.nim +++ b/nimbus/db/aristo/aristo_delete.nim @@ -18,7 +18,7 @@ import std/typetraits, eth/common, results, - ./aristo_delete/[delete_helpers, delete_subtree], + ./aristo_delete/delete_subtree, "."/[aristo_desc, aristo_fetch, aristo_get, aristo_hike, aristo_layers] # ------------------------------------------------------------------------------ @@ -57,7 +57,7 @@ proc deleteImpl( if lf.vtx.vType != Leaf: return err(DelLeafExpexted) - db.disposeOfVtx((hike.root, lf.vid)) + db.layersResVtx((hike.root, lf.vid)) if hike.legs.len == 1: # This was the last node in the trie, meaning we don't have any branches or @@ -75,8 +75,8 @@ proc deleteImpl( # Clear all Merkle hash keys up to the root key for n in 0 .. hike.legs.len - 2: - let vid = hike.legs[n].wp.vid - db.layersResKey((hike.root, vid)) + let wp = hike.legs[n].wp + db.layersResKey((hike.root, wp.vid), wp.vtx) if 0 <= nbl: # Branch has only one entry - move that entry to where the branch was and @@ -89,7 +89,7 @@ proc deleteImpl( if not nxt.isValid: return err(DelVidStaleVtx) - db.disposeOfVtx((hike.root, vid)) + db.layersResVtx((hike.root, vid)) let vtx = case nxt.vType diff --git a/nimbus/db/aristo/aristo_delete/delete_helpers.nim b/nimbus/db/aristo/aristo_delete/delete_helpers.nim deleted file mode 100644 index f946d069e1..0000000000 --- a/nimbus/db/aristo/aristo_delete/delete_helpers.nim +++ /dev/null @@ -1,25 +0,0 @@ -# nimbus-eth1 -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -{.push raises: [].} - -import - ".."/[aristo_desc, aristo_layers] - - -proc disposeOfVtx*( - db: AristoDbRef; # Database, top layer - rvid: RootedVertexID; # Vertex ID to clear - ) = - # Remove entry - db.layersResVtx(rvid) - db.layersResKey(rvid) - -# End diff --git a/nimbus/db/aristo/aristo_delete/delete_subtree.nim b/nimbus/db/aristo/aristo_delete/delete_subtree.nim index 47b68b4d26..cbbdb56942 100644 --- a/nimbus/db/aristo/aristo_delete/delete_subtree.nim +++ b/nimbus/db/aristo/aristo_delete/delete_subtree.nim @@ -12,8 +12,7 @@ import eth/common, - ".."/[aristo_desc, aristo_get, aristo_layers], - ./delete_helpers + ".."/[aristo_desc, aristo_get, aristo_layers] # ------------------------------------------------------------------------------ # Private heplers @@ -34,7 +33,7 @@ proc delSubTreeNow( if vtx.bVid[n].isValid: ? db.delSubTreeNow((rvid.root,vtx.bVid[n])) - db.disposeOfVtx(rvid) + db.layersResVtx(rvid) ok() @@ -64,7 +63,7 @@ proc delStoTreeNow( let stoPath = Hash32((stoPath & vtx.pfx).getBytes()) db.layersPutStoLeaf(mixUp(accPath, stoPath), nil) - db.disposeOfVtx(rvid) + db.layersResVtx(rvid) ok() diff --git a/nimbus/db/aristo/aristo_delta.nim b/nimbus/db/aristo/aristo_delta.nim index c332473d66..a8c232c4b1 100644 --- a/nimbus/db/aristo/aristo_delta.nim +++ b/nimbus/db/aristo/aristo_delta.nim @@ -95,9 +95,11 @@ proc deltaPersistent*( # Store structural single trie entries let writeBatch = ? be.putBegFn() for rvid, vtx in db.balancer.sTab: - be.putVtxFn(writeBatch, rvid, vtx) - for rvid, key in db.balancer.kMap: - be.putKeyFn(writeBatch, rvid, key) + db.balancer.kMap.withValue(rvid, key) do: + be.putVtxFn(writeBatch, rvid, vtx, key[]) + do: + be.putVtxFn(writeBatch, rvid, vtx, default(HashKey)) + be.putTuvFn(writeBatch, db.balancer.vTop) be.putLstFn(writeBatch, lSst) ? be.putEndFn writeBatch # Finalise write batch diff --git a/nimbus/db/aristo/aristo_desc/desc_backend.nim b/nimbus/db/aristo/aristo_desc/desc_backend.nim index e59623e779..6a144cbe0b 100644 --- a/nimbus/db/aristo/aristo_desc/desc_backend.nim +++ b/nimbus/db/aristo/aristo_desc/desc_backend.nim @@ -52,17 +52,11 @@ type ## Generic transaction initialisation function PutVtxFn* = - proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef) + proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef, key: HashKey) {.gcsafe, raises: [].} ## Generic backend database bulk storage function, `VertexRef(nil)` ## values indicate that records should be deleted. - PutKeyFn* = - proc(hdl: PutHdlRef; rvid: RootedVertexID, key: HashKey) - {.gcsafe, raises: [].} - ## Generic backend database bulk storage function, `VOID_HASH_KEY` - ## values indicate that records should be deleted. - PutTuvFn* = proc(hdl: PutHdlRef; vs: VertexID) {.gcsafe, raises: [].} @@ -100,7 +94,6 @@ type putBegFn*: PutBegFn ## Start bulk store session putVtxFn*: PutVtxFn ## Bulk store vertex records - putKeyFn*: PutKeyFn ## Bulk store vertex hashes putTuvFn*: PutTuvFn ## Store top used vertex ID putLstFn*: PutLstFn ## Store saved state putEndFn*: PutEndFn ## Commit bulk store session @@ -115,7 +108,6 @@ proc init*(trg: var BackendObj; src: BackendObj) = trg.putBegFn = src.putBegFn trg.putVtxFn = src.putVtxFn - trg.putKeyFn = src.putKeyFn trg.putTuvFn = src.putTuvFn trg.putLstFn = src.putLstFn trg.putEndFn = src.putEndFn diff --git a/nimbus/db/aristo/aristo_desc/desc_nibbles.nim b/nimbus/db/aristo/aristo_desc/desc_nibbles.nim index 200b600a8f..beb10324df 100644 --- a/nimbus/db/aristo/aristo_desc/desc_nibbles.nim +++ b/nimbus/db/aristo/aristo_desc/desc_nibbles.nim @@ -78,9 +78,9 @@ func slice*(r: NibblesBuf, ibegin: int, iend = -1): NibblesBuf {.noinit.} = result.iend = e.int8 func replaceSuffix*(r: NibblesBuf, suffix: NibblesBuf): NibblesBuf = - for i in 0.. 0: result.isLeaf = (r[0] and 0x20) != 0 let hasOddLen = (r[0] and 0x10) != 0 - var i = 0'i8 - if hasOddLen: - result.nibbles[0] = r[0] and 0x0f - i += 1 + result.nibbles.iend = + if hasOddLen: + result.nibbles.bytes[0] = r[0] shl 4 - for j in 1 ..< r.len: - if i >= 64: - break - result.nibbles[i] = r[j] shr 4 - result.nibbles[i + 1] = r[j] and 0x0f - i += 2 + let bytes = min(31, r.len - 1) + for j in 0 ..< bytes: + result.nibbles.bytes[j] = result.nibbles.bytes[j] or r[j + 1] shr 4 + result.nibbles.bytes[j + 1] = r[j + 1] shl 4 - result.nibbles.iend = i + int8(bytes) * 2 + 1 + else: + let bytes = min(32, r.len - 1) + assign(result.nibbles.bytes.toOpenArray(0, bytes - 1), r.toOpenArray(1, bytes)) + int8(bytes) * 2 else: result.isLeaf = false + result.nibbles.iend = 0 func `&`*(a, b: NibblesBuf): NibblesBuf {.noinit.} = + result.ibegin = 0 for i in 0 ..< a.len: result[i] = a[i] diff --git a/nimbus/db/aristo/aristo_desc/desc_structural.nim b/nimbus/db/aristo/aristo_desc/desc_structural.nim index 4ddb8945a2..216f4f3684 100644 --- a/nimbus/db/aristo/aristo_desc/desc_structural.nim +++ b/nimbus/db/aristo/aristo_desc/desc_structural.nim @@ -43,7 +43,6 @@ type PayloadType* = enum ## Type of leaf data. - RawData ## Generic data AccountData ## `Aristo account` with vertex IDs links StoData ## Slot storage data @@ -58,10 +57,7 @@ type LeafPayload* = object ## The payload type depends on the sub-tree used. The `VertexID(1)` rooted ## sub-tree only has `AccountData` type payload, stoID-based have StoData - ## while generic have RawData case pType*: PayloadType - of RawData: - rawBlob*: seq[byte] ## Opaque data, default value of AccountData: account*: AristoAccount stoID*: StorageID ## Storage vertex ID (if any) @@ -157,9 +153,6 @@ proc `==`*(a, b: LeafPayload): bool = if a.pType != b.pType: return false case a.pType: - of RawData: - if a.rawBlob != b.rawBlob: - return false of AccountData: if a.account != b.account or a.stoID != b.stoID: @@ -208,10 +201,6 @@ proc `==`*(a, b: NodeRef): bool = func dup*(pld: LeafPayload): LeafPayload = ## Duplicate payload. case pld.pType: - of RawData: - LeafPayload( - pType: RawData, - rawBlob: pld.rawBlob) of AccountData: LeafPayload( pType: AccountData, diff --git a/nimbus/db/aristo/aristo_fetch.nim b/nimbus/db/aristo/aristo_fetch.nim index 968e8c18ba..8d4354b210 100644 --- a/nimbus/db/aristo/aristo_fetch.nim +++ b/nimbus/db/aristo/aristo_fetch.nim @@ -23,18 +23,6 @@ import # Private functions # ------------------------------------------------------------------------------ -func mustBeGeneric( - root: VertexID; - ): Result[void,AristoError] = - ## Verify that `root` is neither from an accounts tree nor a strorage tree. - if not root.isValid: - return err(FetchRootVidMissing) - elif root == VertexID(1): - return err(FetchAccRootNotAccepted) - elif LEAST_FREE_VID <= root.distinctBase: - return err(FetchStoRootNotAccepted) - ok() - proc retrieveLeaf( db: AristoDbRef; root: VertexID; @@ -92,34 +80,14 @@ proc retrieveAccountLeaf( proc retrieveMerkleHash( db: AristoDbRef; root: VertexID; - updateOk: bool; ): Result[Hash32,AristoError] = let key = - if updateOk: - db.computeKey((root, root)).valueOr: - if error == GetVtxNotFound: - return ok(EMPTY_ROOT_HASH) - return err(error) - else: - let (key, _) = db.getKeyRc((root, root)).valueOr: - if error == GetKeyNotFound: - return ok(EMPTY_ROOT_HASH) # empty sub-tree - return err(error) - key - ok key.to(Hash32) - - -proc hasPayload( - db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[bool,AristoError] = - let error = db.retrieveLeaf(root, path).errorOr: - return ok(true) + db.computeKey((root, root)).valueOr: + if error == GetVtxNotFound: + return ok(EMPTY_ROOT_HASH) + return err(error) - if error == FetchPathNotFound: - return ok(false) - err(error) + ok key.to(Hash32) proc hasAccountPayload( db: AristoDbRef; @@ -244,12 +212,11 @@ proc fetchAccountRecord*( ok leafVtx.lData.account -proc fetchAccountStateRoot*( +proc fetchStateRoot*( db: AristoDbRef; - updateOk: bool; ): Result[Hash32,AristoError] = ## Fetch the Merkle hash of the account root. - db.retrieveMerkleHash(VertexID(1), updateOk) + db.retrieveMerkleHash(VertexID(1)) proc hasPathAccount*( db: AristoDbRef; @@ -260,38 +227,6 @@ proc hasPathAccount*( ## db.hasAccountPayload(accPath) -proc fetchGenericData*( - db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[seq[byte],AristoError] = - ## For a generic sub-tree starting at `root`, fetch the data record - ## indexed by `path`. - ## - ? root.mustBeGeneric() - let pyl = ? db.retrieveLeaf(root, path) - assert pyl.lData.pType == RawData # debugging only - ok pyl.lData.rawBlob - -proc fetchGenericState*( - db: AristoDbRef; - root: VertexID; - updateOk: bool; - ): Result[Hash32,AristoError] = - ## Fetch the Merkle hash of the argument `root`. - db.retrieveMerkleHash(root, updateOk) - -proc hasPathGeneric*( - db: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[bool,AristoError] = - ## For a generic sub-tree starting at `root` and indexed by `path`, query - ## whether this record exists on the database. - ## - ? root.mustBeGeneric() - db.hasPayload(root, path) - proc fetchStorageData*( db: AristoDbRef; accPath: Hash32; @@ -305,14 +240,13 @@ proc fetchStorageData*( proc fetchStorageRoot*( db: AristoDbRef; accPath: Hash32; - updateOk: bool; ): Result[Hash32,AristoError] = ## Fetch the Merkle hash of the storage root related to `accPath`. let stoID = db.fetchStorageIdImpl(accPath).valueOr: if error == FetchPathNotFound: return ok(EMPTY_ROOT_HASH) # no sub-tree return err(error) - db.retrieveMerkleHash(stoID, updateOk) + db.retrieveMerkleHash(stoID) proc hasPathStorage*( db: AristoDbRef; diff --git a/nimbus/db/aristo/aristo_init/memory_db.nim b/nimbus/db/aristo/aristo_init/memory_db.nim index c1350e5e18..c3aa46d6e4 100644 --- a/nimbus/db/aristo/aristo_init/memory_db.nim +++ b/nimbus/db/aristo/aristo_init/memory_db.nim @@ -44,7 +44,6 @@ type MemDbRef = ref object ## Database sTab: Table[RootedVertexID,seq[byte]] ## Structural vertex table making up a trie - kMap: Table[RootedVertexID,HashKey] ## Merkle hash key mapping tUvi: Option[VertexID] ## Top used vertex ID lSst: Opt[SavedState] ## Last saved state @@ -54,7 +53,6 @@ type MemPutHdlRef = ref object of TypedPutHdlRef sTab: Table[RootedVertexID,seq[byte]] - kMap: Table[RootedVertexID,HashKey] tUvi: Option[VertexID] lSst: Opt[SavedState] @@ -100,9 +98,12 @@ proc getVtxFn(db: MemBackendRef): GetVtxFn = proc getKeyFn(db: MemBackendRef): GetKeyFn = result = proc(rvid: RootedVertexID): Result[HashKey,AristoError] = - let key = db.mdb.kMap.getOrVoid rvid - if key.isValid: - return ok key + let data = db.mdb.sTab.getOrDefault(rvid, EmptyBlob) + if 0 < data.len: + let key = data.deblobify(HashKey).valueOr: + return err(GetKeyNotFound) + if key.isValid: + return ok(key) err(GetKeyNotFound) proc getTuvFn(db: MemBackendRef): GetTuvFn = @@ -129,21 +130,14 @@ proc putBegFn(db: MemBackendRef): PutBegFn = proc putVtxFn(db: MemBackendRef): PutVtxFn = result = - proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef) = + proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef, key: HashKey) = let hdl = hdl.getSession db if hdl.error.isNil: if vtx.isValid: - hdl.sTab[rvid] = vtx.blobify() + hdl.sTab[rvid] = vtx.blobify(key) else: hdl.sTab[rvid] = EmptyBlob -proc putKeyFn(db: MemBackendRef): PutKeyFn = - result = - proc(hdl: PutHdlRef; rvid: RootedVertexID, key: HashKey) = - let hdl = hdl.getSession db - if hdl.error.isNil: - hdl.kMap[rvid] = key - proc putTuvFn(db: MemBackendRef): PutTuvFn = result = proc(hdl: PutHdlRef; vs: VertexID) = @@ -186,12 +180,6 @@ proc putEndFn(db: MemBackendRef): PutEndFn = else: db.mdb.sTab.del vid - for (vid,key) in hdl.kMap.pairs: - if key.isValid: - db.mdb.kMap[vid] = key - else: - db.mdb.kMap.del vid - let tuv = hdl.tUvi.get(otherwise = VertexID(0)) if tuv.isValid: db.mdb.tUvi = some(tuv) @@ -224,7 +212,6 @@ proc memoryBackend*(): BackendRef = db.putBegFn = putBegFn db db.putVtxFn = putVtxFn db - db.putKeyFn = putKeyFn db db.putTuvFn = putTuvFn db db.putLstFn = putLstFn db db.putEndFn = putEndFn db @@ -262,11 +249,15 @@ iterator walkKey*( be: MemBackendRef; ): tuple[rvid: RootedVertexID, key: HashKey] = ## Iteration over the Markle hash sub-table. - for rvid in be.mdb.kMap.keys.toSeq.mapIt(it).sorted: - let key = be.mdb.kMap.getOrVoid(rvid) - if key.isValid: - yield (rvid, key) - + for n,rvid in be.mdb.sTab.keys.toSeq.mapIt(it).sorted: + let data = be.mdb.sTab.getOrDefault(rvid, EmptyBlob) + if 0 < data.len: + let rc = data.deblobify HashKey + if rc.isNone: + when extraTraceMessages: + debug logTxt "walkKeyFn() skip", n, rvid + else: + yield (rvid, rc.value) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_init/rocks_db.nim b/nimbus/db/aristo/aristo_init/rocks_db.nim index 26647acb37..a25b05936e 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db.nim @@ -143,28 +143,16 @@ proc putBegFn(db: RdbBackendRef): PutBegFn = proc putVtxFn(db: RdbBackendRef): PutVtxFn = result = - proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef) = + proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef, key: HashKey) = let hdl = hdl.getSession db if hdl.error.isNil: - db.rdb.putVtx(rvid, vtx).isOkOr: + db.rdb.putVtx(rvid, vtx, key).isOkOr: hdl.error = TypedPutHdlErrRef( pfx: VtxPfx, vid: error[0], code: error[1], info: error[2]) -proc putKeyFn(db: RdbBackendRef): PutKeyFn = - result = - proc(hdl: PutHdlRef; rvid: RootedVertexID, key: HashKey) = - let hdl = hdl.getSession db - if hdl.error.isNil: - db.rdb.putKey(rvid, key).isOkOr: - hdl.error = TypedPutHdlErrRef( - pfx: KeyPfx, - vid: error[0], - code: error[1], - info: error[2]) - proc putTuvFn(db: RdbBackendRef): PutTuvFn = result = proc(hdl: PutHdlRef; vs: VertexID) = @@ -273,7 +261,6 @@ proc rocksDbBackend*( db.putBegFn = putBegFn db db.putVtxFn = putVtxFn db - db.putKeyFn = putKeyFn db db.putTuvFn = putTuvFn db db.putLstFn = putLstFn db db.putEndFn = putEndFn db @@ -320,9 +307,7 @@ iterator walkKey*( ): tuple[rvid: RootedVertexID, key: HashKey] = ## Variant of `walk()` iteration over the Markle hash sub-table. for (rvid, data) in be.rdb.walkKey: - let lid = HashKey.fromBytes(data).valueOr: - continue - yield (rvid, lid) + yield (rvid, data) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_desc.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_desc.nim index 4eaa1879d2..d1cf03e75e 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_desc.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_desc.nim @@ -40,7 +40,6 @@ type RdbInst* = object admCol*: ColFamilyReadWrite ## Admin column family handler vtxCol*: ColFamilyReadWrite ## Vertex column family handler - keyCol*: ColFamilyReadWrite ## Hash key column family handler session*: WriteBatchRef ## For batched `put()` # Note that the key type `VertexID` for LRU caches requires that there is @@ -68,7 +67,6 @@ type ## Column family symbols/handles and names used on the database AdmCF = "AriAdm" ## Admin column family name VtxCF = "AriVtx" ## Vertex column family name - KeyCF = "AriKey" ## Hash key column family name RdbLruCounter* = array[bool, Atomic[uint64]] diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim index 119bb27564..aa0914d427 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim @@ -20,7 +20,6 @@ import ../../[aristo_blobify, aristo_desc], ../init_common, ./rdb_desc, - metrics, std/concurrency/atomics const @@ -34,49 +33,53 @@ when extraTraceMessages: logScope: topics = "aristo-rocksdb" -type - RdbVtxLruCounter = ref object of Counter - RdbKeyLruCounter = ref object of Counter - -var - rdbVtxLruStatsMetric {.used.} = RdbVtxLruCounter.newCollector( - "aristo_rdb_vtx_lru_total", - "Vertex LRU lookup (hit/miss, world/account, branch/leaf)", - labels = ["state", "vtype", "hit"], - ) - rdbKeyLruStatsMetric {.used.} = RdbKeyLruCounter.newCollector( - "aristo_rdb_key_lru_total", "HashKey LRU lookup", labels = ["state", "hit"] - ) - -method collect*(collector: RdbVtxLruCounter, output: MetricHandler) = - let timestamp = collector.now() - - # We don't care about synchronization between each type of metric or between - # the metrics thread and others since small differences like this don't matter - for state in RdbStateType: - for vtype in VertexType: +when defined(metrics): + import + metrics + + type + RdbVtxLruCounter = ref object of Counter + RdbKeyLruCounter = ref object of Counter + + var + rdbVtxLruStatsMetric {.used.} = RdbVtxLruCounter.newCollector( + "aristo_rdb_vtx_lru_total", + "Vertex LRU lookup (hit/miss, world/account, branch/leaf)", + labels = ["state", "vtype", "hit"], + ) + rdbKeyLruStatsMetric {.used.} = RdbKeyLruCounter.newCollector( + "aristo_rdb_key_lru_total", "HashKey LRU lookup", labels = ["state", "hit"] + ) + + method collect*(collector: RdbVtxLruCounter, output: MetricHandler) = + let timestamp = collector.now() + + # We don't care about synchronization between each type of metric or between + # the metrics thread and others since small differences like this don't matter + for state in RdbStateType: + for vtype in VertexType: + for hit in [false, true]: + output( + name = "aristo_rdb_vtx_lru_total", + value = float64(rdbVtxLruStats[state][vtype].get(hit)), + labels = ["state", "vtype", "hit"], + labelValues = [$state, $vtype, $ord(hit)], + timestamp = timestamp, + ) + + method collect*(collector: RdbKeyLruCounter, output: MetricHandler) = + let timestamp = collector.now() + + for state in RdbStateType: for hit in [false, true]: output( - name = "aristo_rdb_vtx_lru_total", - value = float64(rdbVtxLruStats[state][vtype].get(hit)), - labels = ["state", "vtype", "hit"], - labelValues = [$state, $vtype, $ord(hit)], + name = "aristo_rdb_key_lru_total", + value = float64(rdbKeyLruStats[state].get(hit)), + labels = ["state", "hit"], + labelValues = [$state, $ord(hit)], timestamp = timestamp, ) -method collect*(collector: RdbKeyLruCounter, output: MetricHandler) = - let timestamp = collector.now() - - for state in RdbStateType: - for hit in [false, true]: - output( - name = "aristo_rdb_key_lru_total", - value = float64(rdbKeyLruStats[state].get(hit)), - labels = ["state", "hit"], - labelValues = [$state, $ord(hit)], - timestamp = timestamp, - ) - # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ @@ -103,7 +106,7 @@ proc getKey*( ): Result[HashKey,(AristoError,string)] = # Try LRU cache first var rc = rdb.rdKeyLru.get(rvid.vid) - if rc.isOK: + if rc.isOk: rdbKeyLruStats[rvid.to(RdbStateType)].inc(true) return ok(move(rc.value)) @@ -113,19 +116,17 @@ proc getKey*( # A threadvar is used to avoid allocating an environment for onData var res{.threadvar.}: Opt[HashKey] let onData = proc(data: openArray[byte]) = - res = HashKey.fromBytes(data) + res = data.deblobify(HashKey) - let gotData = rdb.keyCol.get(rvid.blobify().data(), onData).valueOr: + let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr: const errSym = RdbBeDriverGetKeyError when extraTraceMessages: trace logTxt "getKey", rvid, error=errSym, info=error return err((errSym,error)) # Correct result if needed - if not gotData: + if not gotData or res.isNone(): res.ok(VOID_HASH_KEY) - elif res.isErr(): - return err((RdbHashKeyExpected,"")) # Parsing failed # Update cache and return rdb.rdKeyLru.put(rvid.vid, res.value()) @@ -144,7 +145,7 @@ proc getVtx*( else: rdb.rdVtxLru.get(rvid.vid) - if rc.isOK: + if rc.isOk: rdbVtxLruStats[rvid.to(RdbStateType)][rc.value().vType].inc(true) return ok(move(rc.value)) diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_init.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_init.nim index b944ce3c73..588f8cff5a 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_init.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_init.nim @@ -140,8 +140,6 @@ proc initImpl( raiseAssert initFailed & " cannot initialise AdmCF descriptor: " & error rdb.vtxCol = baseDb.getColFamily($VtxCF).valueOr: raiseAssert initFailed & " cannot initialise VtxCF descriptor: " & error - rdb.keyCol = baseDb.getColFamily($KeyCF).valueOr: - raiseAssert initFailed & " cannot initialise KeyCF descriptor: " & error ok(guestCFs.mapIt(baseDb.getColFamily(it.name).expect("loaded cf"))) diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim index 1bcef46e24..95168a4880 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_put.nim @@ -84,45 +84,13 @@ proc putAdm*( return err((xid,errSym,error)) ok() -proc putKey*( - rdb: var RdbInst; - rvid: RootedVertexID, key: HashKey; - ): Result[void,(VertexID,AristoError,string)] = - let dsc = rdb.session - if key.isValid: - dsc.put(rvid.blobify().data(), key.data, rdb.keyCol.handle()).isOkOr: - # Caller must `rollback()` which will flush the `rdKeyLru` cache - const errSym = RdbBeDriverPutKeyError - when extraTraceMessages: - trace logTxt "putKey()", vid, error=errSym, info=error - return err((rvid.vid,errSym,error)) - - # Update existing cached items but don't add new ones since doing so is - # likely to evict more useful items (when putting many items, we might even - # evict those that were just added) - discard rdb.rdKeyLru.update(rvid.vid, key) - - else: - dsc.delete(rvid.blobify().data(), rdb.keyCol.handle()).isOkOr: - # Caller must `rollback()` which will flush the `rdKeyLru` cache - const errSym = RdbBeDriverDelKeyError - when extraTraceMessages: - trace logTxt "putKey()", vid, error=errSym, info=error - return err((rvid.vid,errSym,error)) - - # Update cache, vertex will most probably never be visited anymore - rdb.rdKeyLru.del rvid.vid - - ok() - - proc putVtx*( rdb: var RdbInst; - rvid: RootedVertexID; vtx: VertexRef + rvid: RootedVertexID; vtx: VertexRef, key: HashKey ): Result[void,(VertexID,AristoError,string)] = let dsc = rdb.session if vtx.isValid: - dsc.put(rvid.blobify().data(), vtx.blobify(), rdb.vtxCol.handle()).isOkOr: + dsc.put(rvid.blobify().data(), vtx.blobify(key), rdb.vtxCol.handle()).isOkOr: # Caller must `rollback()` which will flush the `rdVtxLru` cache const errSym = RdbBeDriverPutVtxError when extraTraceMessages: @@ -133,6 +101,10 @@ proc putVtx*( # likely to evict more useful items (when putting many items, we might even # evict those that were just added) discard rdb.rdVtxLru.update(rvid.vid, vtx) + if key.isValid: + discard rdb.rdKeyLru.update(rvid.vid, key) + else: + rdb.rdKeyLru.del rvid.vid else: dsc.delete(rvid.blobify().data(), rdb.vtxCol.handle()).isOkOr: @@ -144,6 +116,7 @@ proc putVtx*( # Update cache, vertex will most probably never be visited anymore rdb.rdVtxLru.del rvid.vid + rdb.rdKeyLru.del rvid.vid ok() diff --git a/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim b/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim index a3d6299600..24551a0927 100644 --- a/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim +++ b/nimbus/db/aristo/aristo_init/rocks_db/rdb_walk.nim @@ -52,25 +52,43 @@ iterator walkAdm*(rdb: RdbInst): tuple[xid: uint64, data: seq[byte]] = if key.len == 8 and val.len != 0: yield (uint64.fromBytesBE key, val) -iterator walkKey*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: seq[byte]] = +iterator walkKey*(rdb: RdbInst): tuple[rvid: RootedVertexID, data: HashKey] = ## Walk over key-value pairs of the hash key column of the database. ## ## Non-decodable entries are are ignored. ## block walkBody: - let rit = rdb.keyCol.openIterator().valueOr: + let rit = rdb.vtxCol.openIterator().valueOr: when extraTraceMessages: - trace logTxt "walkKey()", error + trace logTxt "walkVtx()", error break walkBody defer: rit.close() - for (key,val) in rit.pairs: - if val.len != 0: - let rvid = key.deblobify(RootedVertexID).valueOr: - continue + rit.seekToFirst() + var key: RootedVertexID + var value: HashKey + var valid: bool - yield (rvid, val) + proc readKey(data: openArray[byte]) = + key = deblobify(data, RootedVertexID).valueOr: + valid = false + default(RootedVertexID) + proc readValue(data: openArray[byte]) = + value = deblobify(data, HashKey).valueOr: + valid = false + default(HashKey) + + while rit.isValid(): + valid = true + rit.value(readValue) + + if valid: + rit.key(readKey) + if valid: + yield (key, value) + + rit.next() iterator walkVtx*( rdb: RdbInst, kinds: set[VertexType]): tuple[rvid: RootedVertexID, data: VertexRef] = @@ -118,7 +136,6 @@ iterator walkVtx*( yield (key, value) rit.next() - rit.close() # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/aristo/aristo_layers.nim b/nimbus/db/aristo/aristo_layers.nim index 0097f6ef53..bc80bee6c9 100644 --- a/nimbus/db/aristo/aristo_layers.nim +++ b/nimbus/db/aristo/aristo_layers.nim @@ -81,9 +81,14 @@ func layersGetKey*(db: AristoDbRef; rvid: RootedVertexID): Opt[(HashKey, int)] = db.top.kMap.withValue(rvid, item): return Opt.some((item[], 0)) + if rvid in db.top.sTab: + return Opt.some((VOID_HASH_KEY, 0)) + for i, w in enumerate(db.rstack): w.kMap.withValue(rvid, item): return ok((item[], i + 1)) + if rvid in w.sTab: + return Opt.some((VOID_HASH_KEY, i + 1)) Opt.none((HashKey, int)) @@ -122,6 +127,7 @@ func layersPutVtx*( ) = ## Store a (potentally empty) vertex on the top layer db.top.sTab[rvid] = vtx + db.top.kMap.del(rvid) func layersResVtx*( db: AristoDbRef; @@ -135,30 +141,22 @@ func layersResVtx*( func layersPutKey*( db: AristoDbRef; rvid: RootedVertexID; + vtx: VertexRef, key: HashKey; ) = ## Store a (potentally void) hash key on the top layer + db.top.sTab[rvid] = vtx db.top.kMap[rvid] = key - -func layersResKey*(db: AristoDbRef; rvid: RootedVertexID) = +func layersResKey*(db: AristoDbRef; rvid: RootedVertexID, vtx: VertexRef) = ## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the ## equivalent of a delete function. - db.layersPutKey(rvid, VOID_HASH_KEY) + db.layersPutVtx(rvid, vtx) func layersResKeys*(db: AristoDbRef; hike: Hike) = ## Reset all cached keys along the given hike for i in 1..hike.legs.len: - db.layersResKey((hike.root, hike.legs[^i].wp.vid)) - -proc layersUpdateVtx*( - db: AristoDbRef; # Database, top layer - rvid: RootedVertexID; - vtx: VertexRef; # Vertex to add - ) = - ## Update a vertex at `rvid` and reset its associated key entry - db.layersPutVtx(rvid, vtx) - db.layersResKey(rvid) + db.layersResKey((hike.root, hike.legs[^i].wp.vid), hike.legs[^i].wp.vtx) func layersPutAccLeaf*(db: AristoDbRef; accPath: Hash32; leafVtx: VertexRef) = db.top.accLeaves[accPath] = leafVtx @@ -187,6 +185,7 @@ func layersMergeOnto*(src: LayerRef; trg: var LayerObj) = for (vid,vtx) in src.sTab.pairs: trg.sTab[vid] = vtx + trg.kMap.del vid for (vid,key) in src.kMap.pairs: trg.kMap[vid] = key trg.vTop = src.vTop @@ -215,6 +214,7 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = for n in 1 ..< layers.len: for (vid,vtx) in layers[n].sTab.pairs: result.sTab[vid] = vtx + result.kMap.del vid for (vid,key) in layers[n].kMap.pairs: result.kMap[vid] = key for (accPath,vtx) in layers[n].accLeaves.pairs: diff --git a/nimbus/db/aristo/aristo_merge.nim b/nimbus/db/aristo/aristo_merge.nim index fe91f45e0c..5a80fc049f 100644 --- a/nimbus/db/aristo/aristo_merge.nim +++ b/nimbus/db/aristo/aristo_merge.nim @@ -60,16 +60,18 @@ proc mergePayloadImpl( # We're at the root vertex and there is no data - this must be a fresh # VertexID! return ok (db.layersPutLeaf((root, cur), path, payload), nil, nil) - steps: ArrayBuf[NibblesBuf.high + 1, VertexID] + vids: ArrayBuf[NibblesBuf.high + 1, VertexID] + vtxs: ArrayBuf[NibblesBuf.high + 1, VertexRef] template resetKeys() = # Reset cached hashes of touched verticies - for i in 1..steps.len: - db.layersResKey((root, steps[^i])) + for i in 2..vids.len: + db.layersResKey((root, vids[^i]), vtxs[^i]) while path.len > 0: # Clear existing merkle keys along the traversal path - steps.add cur + vids.add cur + vtxs.add vtx let n = path.sharedPrefixLen(vtx.pfx) case vtx.vType @@ -202,37 +204,6 @@ proc mergeAccountRecord*( ok true -proc mergeGenericData*( - db: AristoDbRef; # Database, top layer - root: VertexID; # MPT state root - path: openArray[byte]; # Leaf item to add to the database - data: openArray[byte]; # Raw data payload value - ): Result[bool,AristoError] = - ## Variant of `mergeXXX()` for generic sub-trees, i.e. for arguments - ## `root` greater than `VertexID(1)` and smaller than `LEAST_FREE_VID`. - ## - ## On success, the function returns `true` if the `data` argument was merged - ## into the database ot updated, and `false` if it was on the database - ## already. - ## - # Verify that `root` is neither an accounts tree nor a strorage tree. - if not root.isValid: - return err(MergeRootVidMissing) - elif root == VertexID(1): - return err(MergeAccRootNotAccepted) - elif LEAST_FREE_VID <= root.distinctBase: - return err(MergeStoRootNotAccepted) - - let - pyl = LeafPayload(pType: RawData, rawBlob: @data) - - discard db.mergePayloadImpl(root, path, Opt.none(VertexRef), pyl).valueOr: - if error == MergeNoAction: - return ok false - return err error - - ok true - proc mergeStorageData*( db: AristoDbRef; # Database, top layer accPath: Hash32; # Needed for accounts payload diff --git a/nimbus/db/aristo/aristo_nearby.nim b/nimbus/db/aristo/aristo_nearby.nim index 31defcec3b..cfc850e522 100644 --- a/nimbus/db/aristo/aristo_nearby.nim +++ b/nimbus/db/aristo/aristo_nearby.nim @@ -21,7 +21,7 @@ {.push raises: [].} import - std/[tables, typetraits], + std/[tables], eth/common, results, "."/[aristo_desc, aristo_fetch, aristo_get, aristo_hike, aristo_path] @@ -65,7 +65,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 = ## Find the greatest index for an argument branch `vtx` link with index ## less or equal the argument `nibble`. if vtx.vType == Branch: - for n in maxInx.countDown 0: + for n in maxInx.countdown 0: if vtx.bVid[n].isValid: return n -1 @@ -400,7 +400,7 @@ iterator rightPairs*( var hike: Hike discard start.hikeUp(db, Opt.none(VertexRef), hike) var rc = hike.right db - while rc.isOK: + while rc.isOk: hike = rc.value let (key, pyl) = hike.toLeafTiePayload yield (key, pyl) @@ -439,17 +439,6 @@ iterator rightPairsAccount*( for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start): yield (lty.path, pyl.account) -iterator rightPairsGeneric*( - db: AristoDbRef; # Database layer - root: VertexID; # Generic root (different from VertexID) - start = low(PathID); # Before or at first value - ): (PathID,seq[byte]) = - ## Variant of `rightPairs()` for a generic tree - # Verify that `root` is neither from an accounts tree nor a strorage tree. - if VertexID(1) < root and root.distinctBase < LEAST_FREE_VID: - for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start): - yield (lty.path, pyl.rawBlob) - iterator rightPairsStorage*( db: AristoDbRef; # Database layer accPath: Hash32; # Account the storage data belong to @@ -497,7 +486,7 @@ iterator leftPairs*( discard start.hikeUp(db, Opt.none(VertexRef), hike) var rc = hike.left db - while rc.isOK: + while rc.isOk: hike = rc.value let (key, pyl) = hike.toLeafTiePayload yield (key, pyl) diff --git a/nimbus/db/aristo/aristo_part.nim b/nimbus/db/aristo/aristo_part.nim index 02913aaddb..202bec2a64 100644 --- a/nimbus/db/aristo/aristo_part.nim +++ b/nimbus/db/aristo/aristo_part.nim @@ -366,37 +366,6 @@ proc partReRoot*( # Public merge functions on partial tree database # ------------------------------------------------------------------------------ -proc partMergeGenericData*( - ps: PartStateRef; - root: VertexID; # MPT state root - path: openArray[byte]; # Leaf item to add to the database - data: openArray[byte]; # Raw data payload value - ): Result[bool,AristoError] = - ## .. - let mergeError = block: - # Opportunistically try whether it just works - let rc = ps.db.mergeGenericData(root, path, data) - if rc.isOk or rc.error != GetVtxNotFound: - return rc - rc.error - - # Otherwise clean the way removing blind link and retry - let - ctx = ps.ctxMergeBegin(root, path).valueOr: - let ctxErr = if error == PartCtxNotAvailable: mergeError else: error - return err(ctxErr) - rc = ps.db.mergeGenericData(root, path, data) - - # Evaluate result => commit/rollback - if rc.isErr: - ? ctx.ctxMergeRollback() - return rc - if not ? ctx.ctxMergeCommit(): - return err(PartVtxSlotWasNotModified) - - ok(rc.value) - - proc partMergeAccountRecord*( ps: PartStateRef; accPath: Hash32; # Even nibbled byte path diff --git a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim index 05760d55f2..4bd4acb053 100644 --- a/nimbus/db/aristo/aristo_part/part_chain_rlp.nim +++ b/nimbus/db/aristo/aristo_part/part_chain_rlp.nim @@ -13,7 +13,7 @@ import eth/common, results, - ".."/[aristo_desc, aristo_get, aristo_utils, aristo_compute, aristo_serialise] + ".."/[aristo_desc, aristo_get, aristo_utils, aristo_serialise] const ChainRlpNodesNoEntry* = { @@ -37,7 +37,6 @@ proc chainRlpNodes*( ): Result[void,AristoError] = ## Inspired by the `getBranchAux()` function from `hexary.nim` let - key = ? db.computeKey rvid (vtx,_) = ? db.getVtxRc rvid node = vtx.toNode(rvid.root, db).valueOr: return err(PartChnNodeConvError) diff --git a/nimbus/db/aristo/aristo_part/part_helpers.nim b/nimbus/db/aristo/aristo_part/part_helpers.nim index 70b67f6963..7f85ca4141 100644 --- a/nimbus/db/aristo/aristo_part/part_helpers.nim +++ b/nimbus/db/aristo/aristo_part/part_helpers.nim @@ -64,14 +64,15 @@ proc read(rlp: var Rlp; T: type PrfNode): T {.gcsafe, raises: [RlpError].} = let (isLeaf, pathSegment) = NibblesBuf.fromHexPrefix blobs[0] if isLeaf: return PrfNode( - prfType: ignore, - - vtx: VertexRef( - vType: Leaf, - pfx: pathSegment, - lData: LeafPayload( - pType: RawData, - rawBlob: blobs[1]))) + prfType: ignore, ) + + # TODO interpret the blob (?) + # vtx: VertexRef( + # vType: Leaf, + # pfx: pathSegment, + # lData: LeafPayload( + # pType: RawData, + # rawBlob: blobs[1]))) else: var node = PrfNode( prfType: isExtension, @@ -145,7 +146,9 @@ func toNodesTab*( # Decode payload to deficated format for storage or accounts var pyl: PrfPayload try: - pyl = rlp.decode(nd.vtx.lData.rawBlob, PrfPayload) + # TODO interpret the blob + # pyl = rlp.decode(nd.vtx.lData.rawBlob, PrfPayload) + pyl = PrfPayload(prfType: isError, error: PartRlpPayloadException) except RlpError: pyl = PrfPayload(prfType: isError, error: PartRlpPayloadException) diff --git a/nimbus/db/aristo/aristo_profile.nim b/nimbus/db/aristo/aristo_profile.nim index 8d2851c325..76bae05212 100644 --- a/nimbus/db/aristo/aristo_profile.nim +++ b/nimbus/db/aristo/aristo_profile.nim @@ -47,7 +47,7 @@ func toFloat(ela: Duration): float = ## Convert the argument `ela` to a floating point seconds result. let elaS = ela.inSeconds - elaNs = (ela - initDuration(seconds=elaS)).inNanoSeconds + elaNs = (ela - initDuration(seconds=elaS)).inNanoseconds elaS.float + elaNs.float / 1_000_000_000 proc updateTotal(t: AristoDbProfListRef; fnInx: uint) = @@ -107,12 +107,12 @@ func toStr*(elapsed: Duration): string = result = elapsed.ppMins elif 0 < times.inSeconds(elapsed): result = elapsed.ppSecs - elif 0 < times.inMilliSeconds(elapsed): + elif 0 < times.inMilliseconds(elapsed): result = elapsed.ppMs - elif 0 < times.inMicroSeconds(elapsed): + elif 0 < times.inMicroseconds(elapsed): result = elapsed.ppUs else: - result = $elapsed.inNanoSeconds & "ns" + result = $elapsed.inNanoseconds & "ns" except ValueError: result = $elapsed diff --git a/nimbus/db/aristo/aristo_serialise.nim b/nimbus/db/aristo/aristo_serialise.nim index 5d2c620939..a89d6ccc9f 100644 --- a/nimbus/db/aristo/aristo_serialise.nim +++ b/nimbus/db/aristo/aristo_serialise.nim @@ -34,8 +34,6 @@ proc serialise( ## of account type, otherwise pass the data as is. ## case pyl.pType: - of RawData: - ok pyl.rawBlob of AccountData: let key = block: if pyl.stoID.isValid: diff --git a/nimbus/db/aristo/aristo_tx/tx_frame.nim b/nimbus/db/aristo/aristo_tx/tx_frame.nim index 744b3d44d4..b3e5260313 100644 --- a/nimbus/db/aristo/aristo_tx/tx_frame.nim +++ b/nimbus/db/aristo/aristo_tx/tx_frame.nim @@ -178,7 +178,7 @@ iterator txFrameWalk*(tx: AristoTxRef): (int,AristoTxRef,LayerRef,AristoError) = yield (0,tx,db.top,AristoError(0)) # Walk down the transaction stack - for level in (tx.level-1).countDown(1): + for level in (tx.level-1).countdown(1): tx = tx.parent if tx.isNil or tx.level != level: yield (-1,tx,LayerRef(nil),TxStackGarbled) diff --git a/nimbus/db/aristo/aristo_utils.nim b/nimbus/db/aristo/aristo_utils.nim index 75328f2045..b30f728265 100644 --- a/nimbus/db/aristo/aristo_utils.nim +++ b/nimbus/db/aristo/aristo_utils.nim @@ -16,7 +16,7 @@ import eth/common, results, - "."/[aristo_constants, aristo_desc, aristo_get, aristo_layers] + "."/[aristo_desc, aristo_compute] # ------------------------------------------------------------------------------ # Public functions, converters @@ -26,8 +26,6 @@ proc toNode*( vtx: VertexRef; # Vertex to convert root: VertexID; # Sub-tree root the `vtx` belongs to db: AristoDbRef; # Database - stopEarly = true; # Full list of missing links if `false` - beKeyOk = true; # Allow fetching DB backend keys ): Result[NodeRef,seq[VertexID]] = ## Convert argument the vertex `vtx` to a node type. Missing Merkle hash ## keys are searched for on the argument database `db`. @@ -40,19 +38,6 @@ proc toNode*( ## only from the cache layer. This does not affect a link key for a payload ## storage root. ## - proc getKey(db: AristoDbRef; rvid: RootedVertexID; beOk: bool): HashKey = - block body: - let key = db.layersGetKey(rvid).valueOr: - break body - if key[0].isValid: - return key[0] - else: - return VOID_HASH_KEY - if beOk: - let rc = db.getKeyBE rvid - if rc.isOk: - return rc.value[0] - VOID_HASH_KEY case vtx.vType: of Leaf: @@ -61,30 +46,22 @@ proc toNode*( if vtx.lData.pType == AccountData: let stoID = vtx.lData.stoID if stoID.isValid: - let key = db.getKey (stoID.vid, stoID.vid) - if not key.isValid: + let key = db.computeKey((stoID.vid, stoID.vid)).valueOr: return err(@[stoID.vid]) + node.key[0] = key return ok node of Branch: let node = NodeRef(vtx: vtx.dup()) - var missing: seq[VertexID] for n in 0 .. 15: let vid = vtx.bVid[n] if vid.isValid: - let key = db.getKey((root, vid), beOk=beKeyOk) - if key.isValid: - node.key[n] = key - elif stopEarly: + let key = db.computeKey((root, vid)).valueOr: return err(@[vid]) - else: - missing.add vid - if 0 < missing.len: - return err(missing) + node.key[n] = key return ok node - iterator subVids*(vtx: VertexRef): VertexID = ## Returns the list of all sub-vertex IDs for the argument `vtx`. case vtx.vType: diff --git a/nimbus/db/core_db/backend/aristo_rocksdb.nim b/nimbus/db/core_db/backend/aristo_rocksdb.nim index 56b73f38e4..9bf7b02753 100644 --- a/nimbus/db/core_db/backend/aristo_rocksdb.nim +++ b/nimbus/db/core_db/backend/aristo_rocksdb.nim @@ -118,7 +118,7 @@ proc toRocksDb*( # Reduce number of files when the database grows cfOpts.targetFileSizeBase = cfOpts.writeBufferSize - cfOpts.targetFileSizeMultiplier = 8 + cfOpts.targetFileSizeMultiplier = 6 let dbOpts = defaultDbOptions(autoClose = true) dbOpts.maxOpenFiles = opts.maxOpenFiles diff --git a/nimbus/db/core_db/backend/aristo_trace.nim b/nimbus/db/core_db/backend/aristo_trace.nim index 9c23e5bb7c..ad3bb9a954 100644 --- a/nimbus/db/core_db/backend/aristo_trace.nim +++ b/nimbus/db/core_db/backend/aristo_trace.nim @@ -35,7 +35,6 @@ type TrpOops = 0 TrpKvt TrpAccounts - TrpGeneric TrpStorage TraceRequest* = enum @@ -176,8 +175,6 @@ when CoreDbNoisyCaptJournal: $$(key.toOpenArray(0, key.len - 1)) of TrpAccounts: "1:" & $$(key.toOpenArray(0, key.len - 1)) - of TrpGeneric: - $key[0] & ":" & $$(key.toOpenArray(1, key.len - 1)) of TrpStorage: "1:" & $$(key.toOpenArray(0, min(31, key.len - 1))) & ":" & (if 32 < key.len: $$(key.toOpenArray(32, key.len - 1)) else: "") @@ -233,21 +230,6 @@ proc jLogger( ) = tr.jLogger(EmptyBlob, ti) -proc jLogger( - tr: TraceRecorderRef; - root: VertexID; - path: openArray[byte]; - ti: TraceDataItemRef; - ) = - tr.jLogger(@[root.byte] & @path, ti) - -proc jLogger( - tr: TraceRecorderRef; - root: VertexID; - ti: TraceDataItemRef; - ) = - tr.jLogger(@[root.byte], ti) - proc jLogger( tr: TraceRecorderRef; accPath: Hash32; @@ -261,16 +243,10 @@ proc jLogger( func to(w: AristoApiProfNames; T: type TracePfx): T = case w: of AristoApiProfFetchAccountRecordFn, - AristoApiProfFetchAccountStateRootFn, + AristoApiProfFetchStateRootFn, AristoApiProfDeleteAccountRecordFn, AristoApiProfMergeAccountRecordFn: return TrpAccounts - of AristoApiProfFetchGenericDataFn, - AristoApiProfFetchGenericStateFn, - AristoApiProfDeleteGenericDataFn, - AristoApiProfDeleteGenericTreeFn, - AristoApiProfMergeGenericDataFn: - return TrpGeneric of AristoApiProfFetchStorageDataFn, AristoApiProfFetchStorageRootFn, AristoApiProfDeleteStorageDataFn, @@ -349,7 +325,7 @@ func logRecord( func logRecord( info: AristoApiProfNames; req: TraceRequest; - sto: Uint256; + sto: UInt256; ): TraceDataItemRef = TraceDataItemRef( pfx: info.to(TracePfx), @@ -490,79 +466,32 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = debug logTxt $info, level, accPath, accRec ok accRec - tracerApi.fetchAccountStateRoot = + tracerApi.fetchStateRoot = proc(mpt: AristoDbRef; - updateOk: bool; ): Result[Hash32,AristoError] = - const info = AristoApiProfFetchAccountStateRootFn + const info = AristoApiProfFetchStateRootFn when CoreDbNoisyCaptJournal: let level = tr.topLevel() # Find entry on DB - let state = api.fetchAccountStateRoot(mpt, updateOk).valueOr: + let state = api.fetchStateRoot(mpt).valueOr: when CoreDbNoisyCaptJournal: - debug logTxt $info, level, updateOk, error + debug logTxt $info, level, error tr.jLogger logRecord(info, TrqFind, error) return err(error) tr.jLogger logRecord(info, TrqFind, state) when CoreDbNoisyCaptJournal: - debug logTxt $info, level, updateOk, state - ok state - - tracerApi.fetchGenericData = - proc(mpt: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[seq[byte],AristoError] = - const info = AristoApiProfFetchGenericDataFn - - when CoreDbNoisyCaptJournal: - let level = tr.topLevel() - - # Find entry on DB - let data = api.fetchGenericData(mpt, root, path).valueOr: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path=($$path), error - tr.jLogger(root, path, logRecord(info, TrqFind, error)) - return err(error) - - tr.jLogger(root, path, logRecord(info, TrqFind, data)) - - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path=($$path), data - ok data - - tracerApi.fetchGenericState = - proc(mpt: AristoDbRef; - root: VertexID; - updateOk: bool; - ): Result[Hash32,AristoError] = - const info = AristoApiProfFetchGenericStateFn - - when CoreDbNoisyCaptJournal: - let level = tr.topLevel() - - # Find entry on DB - let state = api.fetchAccountStateRoot(mpt, updateOk).valueOr: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, updateOk, error - tr.jLogger(root, logRecord(info, TrqFind, error)) - return err(error) - - tr.jLogger(root, logRecord(info, TrqFind, state)) - - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, updateOk, state + debug logTxt $info, level, state ok state tracerApi.fetchStorageData = proc(mpt: AristoDbRef; accPath: Hash32; stoPath: Hash32; - ): Result[Uint256,AristoError] = + ): Result[UInt256,AristoError] = const info = AristoApiProfFetchStorageDataFn when CoreDbNoisyCaptJournal: @@ -584,7 +513,6 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = tracerApi.fetchStorageRoot = proc(mpt: AristoDbRef; accPath: Hash32; - updateOk: bool; ): Result[Hash32,AristoError] = const info = AristoApiProfFetchStorageRootFn @@ -592,16 +520,16 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = let level = tr.topLevel() # Find entry on DB - let state = api.fetchStorageRoot(mpt, accPath, updateOk).valueOr: + let state = api.fetchStorageRoot(mpt, accPath).valueOr: when CoreDbNoisyCaptJournal: - debug logTxt $info, level, accPath, updateOk, error + debug logTxt $info, level, accPath, error tr.jLogger(accPath, logRecord(info, TrqFind, error)) return err(error) tr.jLogger(accPath, logRecord(info, TrqFind, state)) when CoreDbNoisyCaptJournal: - debug logTxt $info, level, accPath, updateOk, state + debug logTxt $info, level, accPath, state ok state tracerApi.deleteAccountRecord = @@ -640,66 +568,6 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = debug logTxt $info, level, accPath ok() - tracerApi.deleteGenericData = - proc(mpt: AristoDbRef; - root: VertexID; - path: openArray[byte]; - ): Result[bool,AristoError] = - const info = AristoApiProfDeleteGenericDataFn - - when CoreDbNoisyCaptJournal: - let level = tr.topLevel() - - # Find entry on DB (for comprehensive log record) - let tiRec = block: - let rc = api.fetchGenericData(mpt, root, path) - if rc.isOk: - logRecord(info, TrqDelete, rc.value) - elif rc.error == FetchPathNotFound: - logRecord(info, TrqDelete) - else: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path=($$path), error=rc.error - tr.jLogger(root, path, logRecord(info, TrqDelete, rc.error)) - return err(rc.error) - - # Delete from DB - let emptyTrie = api.deleteGenericData(mpt, root, path).valueOr: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path=($$path), error - tr.jLogger(root, path, logRecord(info, TrqDelete, error)) - return err(error) - - # Log on journal - tr.jLogger(root, path, tiRec) - - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path=($$path), emptyTrie - ok emptyTrie - - tracerApi.deleteGenericTree = - proc(mpt: AristoDbRef; - root: VertexID; - ): Result[void,AristoError] = - const info = AristoApiProfDeleteGenericTreeFn - - when CoreDbNoisyCaptJournal: - let level = tr.topLevel() - - # Delete from DB - api.deleteGenericTree(mpt, root).isOkOr: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, error - tr.jLogger(root, logRecord(info, TrqDelete, error)) - return err(error) - - # Log on journal - tr.jLogger(root, logRecord(info, TrqDelete)) - - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root - ok() - tracerApi.deleteStorageData = proc(mpt: AristoDbRef; accPath: Hash32; @@ -792,40 +660,6 @@ proc ariTraceRecorder(tr: TraceRecorderRef) = debug logTxt $info, level, accPath, accRec, hadPath, updated ok updated - tracerApi.mergeGenericData = - proc(mpt: AristoDbRef; - root: VertexID; - path: openArray[byte]; - data: openArray[byte]; - ): Result[bool,AristoError] = - const info = AristoApiProfMergeGenericDataFn - - when CoreDbNoisyCaptJournal: - let level = tr.topLevel() - - # Find entry on DB (for comprehensive log record) - let - hadPath = api.hasPathGeneric(mpt, root, path).valueOr: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path, error - tr.jLogger(root, path, logRecord(info, TrqAdd, error)) - return err(error) - mode = if hadPath: TrqModify else: TrqAdd - - # Do the merge - let updated = api.mergeGenericData(mpt, root, path, data).valueOr: - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path, error - tr.jLogger(root, path, logRecord(info, mode, error)) - return err(error) - - # Log on journal - tr.jLogger(root, path, logRecord(info, mode, data)) - - when CoreDbNoisyCaptJournal: - debug logTxt $info, level, root, path, data=($$data), hadPath, updated - ok updated - tracerApi.mergeStorageData = proc(mpt: AristoDbRef; accPath: Hash32; @@ -909,8 +743,6 @@ iterator ariLog*(log: TraceLogInstRef): (VertexID,seq[byte],TraceDataItemRef) = case pfx: of TrpAccounts,TrpStorage: (VertexID(1), p.key[1..^1]) - of TrpGeneric: - (VertexID(p.key[1]), p.key[2..^1]) else: continue yield (root, key, p.data) diff --git a/nimbus/db/core_db/base.nim b/nimbus/db/core_db/base.nim index 78f28d6f3d..672cba1177 100644 --- a/nimbus/db/core_db/base.nim +++ b/nimbus/db/core_db/base.nim @@ -26,7 +26,6 @@ export CoreDbErrorCode, CoreDbError, CoreDbKvtRef, - CoreDbMptRef, CoreDbPersistentTypes, CoreDbRef, CoreDbTxRef, @@ -208,7 +207,7 @@ proc stateBlockNumber*(db: CoreDbRef): BlockNumber = db.ifTrackNewApi: debug logTxt, api, elapsed, result proc verify*( - db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + db: CoreDbRef | CoreDbAccRef; proof: openArray[seq[byte]]; root: Hash32; path: openArray[byte]; @@ -239,7 +238,7 @@ proc verify*( mpt.ifTrackNewApi: debug logTxt, api, elapsed, result proc verifyOk*( - db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + db: CoreDbRef | CoreDbAccRef; proof: openArray[seq[byte]]; root: Hash32; path: openArray[byte]; @@ -263,7 +262,7 @@ proc verifyOk*( mpt.ifTrackNewApi: debug logTxt, api, elapsed, result proc verify*( - db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + db: CoreDbRef | CoreDbAccRef; proof: openArray[seq[byte]]; root: Hash32; path: Hash32; @@ -284,7 +283,7 @@ proc verify*( mpt.ifTrackNewApi: debug logTxt, api, elapsed, result proc verifyOk*( - db: CoreDbRef | CoreDbMptRef | CoreDbAccRef; + db: CoreDbRef | CoreDbAccRef; proof: openArray[seq[byte]]; root: Hash32; path: Hash32; @@ -410,130 +409,6 @@ proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): bool = result = kvt.call(hasKeyRc, kvt.kvt, key).valueOr: false kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result -# ------------------------------------------------------------------------------ -# Public functions for generic columns -# ------------------------------------------------------------------------------ - -proc getGeneric*( - ctx: CoreDbCtxRef; - clearData = false; - ): CoreDbMptRef = - ## Get a generic MPT, viewed as column - ## - ctx.setTrackNewApi CtxGetGenericFn - result = CoreDbMptRef(ctx) - if clearData: - result.call(deleteGenericTree, ctx.mpt, CoreDbVidGeneric).isOkOr: - raiseAssert $api & ": " & $error - ctx.ifTrackNewApi: debug logTxt, api, clearData, elapsed - -# ----------- generic MPT --------------- - -proc proof*( - mpt: CoreDbMptRef; - key: openArray[byte]; - ): CoreDbRc[(seq[seq[byte]],bool)] = - ## On the generic MPT, collect the nodes along the `key` interpreted as - ## path. Return these path nodes as a chain of rlp-encoded blobs followed - ## by a bool value which is `true` if the `key` path exists in the database, - ## and `false` otherwise. In the latter case, the chain of rlp-encoded blobs - ## are the nodes proving that the `key` path does not exist. - ## - mpt.setTrackNewApi MptProofFn - result = block: - let rc = mpt.call(partGenericTwig, mpt.mpt, CoreDbVidGeneric, key) - if rc.isOk: - ok(rc.value) - else: - err(rc.error.toError($api, ProofCreate)) - mpt.ifTrackNewApi: debug logTxt, api, elapsed, result - -proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[seq[byte]] = - ## Fetch data from the argument `mpt`. The function always returns a - ## non-empty `seq[byte]` or an error code. - ## - mpt.setTrackNewApi MptFetchFn - result = block: - let rc = mpt.call(fetchGenericData, mpt.mpt, CoreDbVidGeneric, key) - if rc.isOk: - ok(rc.value) - elif rc.error == FetchPathNotFound: - err(rc.error.toError($api, MptNotFound)) - else: - err(rc.error.toError $api) - mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result - -proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[seq[byte]] = - ## This function returns an empty `seq[byte]` if the argument `key` is not found - ## on the database. - ## - mpt.setTrackNewApi MptFetchOrEmptyFn - result = block: - let rc = mpt.call(fetchGenericData, mpt.mpt, CoreDbVidGeneric, key) - if rc.isOk: - ok(rc.value) - elif rc.error == FetchPathNotFound: - CoreDbRc[seq[byte]].ok(EmptyBlob) - else: - err(rc.error.toError $api) - mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result - -proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] = - mpt.setTrackNewApi MptDeleteFn - result = block: - let rc = mpt.call(deleteGenericData, mpt.mpt,CoreDbVidGeneric, key) - if rc.isOk: - ok() - elif rc.error == DelPathNotFound: - err(rc.error.toError($api, MptNotFound)) - else: - err(rc.error.toError $api) - mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result - -proc merge*( - mpt: CoreDbMptRef; - key: openArray[byte]; - val: openArray[byte]; - ): CoreDbRc[void] = - mpt.setTrackNewApi MptMergeFn - result = block: - let rc = mpt.call(mergeGenericData, mpt.mpt,CoreDbVidGeneric, key, val) - if rc.isOk: - ok() - else: - err(rc.error.toError $api) - mpt.ifTrackNewApi: - debug logTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result - -proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] = - ## This function would be named `contains()` if it returned `bool` rather - ## than a `Result[]`. - ## - mpt.setTrackNewApi MptHasPathFn - result = block: - let rc = mpt.call(hasPathGeneric, mpt.mpt, CoreDbVidGeneric, key) - if rc.isOk: - ok(rc.value) - else: - err(rc.error.toError $api) - mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result - -proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash32] = - ## This function retrieves the Merkle state hash of the argument - ## database column (if acvailable.) - ## - ## If the argument `updateOk` is set `true`, the Merkle hashes of the - ## database will be updated first (if needed, at all). - ## - mpt.setTrackNewApi MptStateFn - result = block: - let rc = mpt.call(fetchGenericState, mpt.mpt, CoreDbVidGeneric, updateOk) - if rc.isOk: - ok(rc.value) - else: - err(rc.error.toError $api) - mpt.ifTrackNewApi: debug logTxt, api, elapsed, updateOK, result - # ------------------------------------------------------------------------------ # Public methods for accounts # ------------------------------------------------------------------------------ @@ -655,21 +530,17 @@ proc hasPath*( acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result -proc stateRoot*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash32] = +proc getStateRoot*(acc: CoreDbAccRef): CoreDbRc[Hash32] = ## This function retrieves the Merkle state hash of the accounts ## column (if available.) - ## - ## If the argument `updateOk` is set `true`, the Merkle hashes of the - ## database will be updated first (if needed, at all). - ## acc.setTrackNewApi AccStateFn result = block: - let rc = acc.call(fetchAccountStateRoot, acc.mpt, updateOk) + let rc = acc.call(fetchStateRoot, acc.mpt) if rc.isOk: ok(rc.value) else: err(rc.error.toError $api) - acc.ifTrackNewApi: debug logTxt, api, elapsed, updateOK, result + acc.ifTrackNewApi: debug logTxt, api, elapsed, result # ------------ storage --------------- @@ -772,36 +643,32 @@ proc slotMerge*( debug logTxt, api, elapsed, accPath=($$accPath), stoPath=($$stoPath), stoData, result -proc slotState*( +proc slotStorageRoot*( acc: CoreDbAccRef; accPath: Hash32; - updateOk = false; ): CoreDbRc[Hash32] = ## This function retrieves the Merkle state hash of the storage data ## column (if available) related to the account indexed by the key ## `accPath`.`. ## - ## If the argument `updateOk` is set `true`, the Merkle hashes of the - ## database will be updated first (if needed, at all). - ## - acc.setTrackNewApi AccSlotStateFn + acc.setTrackNewApi AccSlotStorageRootFn result = block: - let rc = acc.call(fetchStorageRoot, acc.mpt, accPath, updateOk) + let rc = acc.call(fetchStorageRoot, acc.mpt, accPath) if rc.isOk: ok(rc.value) else: err(rc.error.toError $api) acc.ifTrackNewApi: - debug logTxt, api, elapsed, accPath=($$accPath), updateOk, result + debug logTxt, api, elapsed, accPath=($$accPath), result -proc slotStateEmpty*( +proc slotStorageEmpty*( acc: CoreDbAccRef; accPath: Hash32; ): CoreDbRc[bool] = ## This function returns `true` if the storage data column is empty or ## missing. ## - acc.setTrackNewApi AccSlotStateEmptyFn + acc.setTrackNewApi AccSlotStorageEmptyFn result = block: let rc = acc.call(hasStorageData, acc.mpt, accPath) if rc.isOk: @@ -811,12 +678,12 @@ proc slotStateEmpty*( acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result -proc slotStateEmptyOrVoid*( +proc slotStorageEmptyOrVoid*( acc: CoreDbAccRef; accPath: Hash32; ): bool = - ## Convenience wrapper, returns `true` where `slotStateEmpty()` would fail. - acc.setTrackNewApi AccSlotStateEmptyOrVoidFn + ## Convenience wrapper, returns `true` where `slotStorageEmpty()` would fail. + acc.setTrackNewApi AccSlotStorageEmptyOrVoidFn result = block: let rc = acc.call(hasStorageData, acc.mpt, accPath) if rc.isOk: @@ -832,14 +699,13 @@ proc recast*( acc: CoreDbAccRef; accPath: Hash32; accRec: CoreDbAccount; - updateOk = false; ): CoreDbRc[Account] = ## Complete the argument `accRec` to the portable Ethereum representation ## of an account statement. This conversion may fail if the storage colState - ## hash (see `slotState()` above) is currently unavailable. + ## hash (see `slotStorageRoot()` above) is currently unavailable. ## acc.setTrackNewApi AccRecastFn - let rc = acc.call(fetchStorageRoot, acc.mpt, accPath, updateOk) + let rc = acc.call(fetchStorageRoot, acc.mpt, accPath) result = block: if rc.isOk: ok Account( @@ -850,8 +716,8 @@ proc recast*( else: err(rc.error.toError $api) acc.ifTrackNewApi: - let slotState = if rc.isOk: $$(rc.value) else: "n/a" - debug logTxt, api, elapsed, accPath=($$accPath), slotState, result + let storageRoot = if rc.isOk: $$(rc.value) else: "n/a" + debug logTxt, api, elapsed, accPath=($$accPath), storageRoot, result # ------------------------------------------------------------------------------ # Public transaction related methods diff --git a/nimbus/db/core_db/base/api_tracking.nim b/nimbus/db/core_db/base/api_tracking.nim index e333757228..9c3c8e42fc 100644 --- a/nimbus/db/core_db/base/api_tracking.nim +++ b/nimbus/db/core_db/base/api_tracking.nim @@ -23,7 +23,7 @@ type ## Needed for local `$` as it would be ambiguous for `Duration` CoreDbApiTrackRef* = - CoreDbRef | CoreDbKvtRef | CoreDbCtxRef | CoreDbMptRef | CoreDbAccRef | + CoreDbRef | CoreDbKvtRef | CoreDbCtxRef | CoreDbAccRef | CoreDbTxRef CoreDbFnInx* = enum @@ -45,9 +45,9 @@ type AccSlotHasPathFn = "slotHasPath" AccSlotMergeFn = "slotMerge" AccSlotProofFn = "slotProof" - AccSlotStateFn = "slotState" - AccSlotStateEmptyFn = "slotStateEmpty" - AccSlotStateEmptyOrVoidFn = "slotStateEmptyOrVoid" + AccSlotStorageRootFn = "slotStorageRoot" + AccSlotStorageEmptyFn = "slotStorageEmpty" + AccSlotStorageEmptyOrVoidFn = "slotStorageEmptyOrVoid" AccSlotPairsIt = "slotPairs" BaseFinishFn = "finish" @@ -79,16 +79,6 @@ type KvtPairsIt = "pairs" KvtPutFn = "put" - MptDeleteFn = "mpt/delete" - MptFetchFn = "mpt/fetch" - MptFetchOrEmptyFn = "mpt/fetchOrEmpty" - MptForgetFn = "mpt/forget" - MptHasPathFn = "mpt/hasPath" - MptMergeFn = "mpt/merge" - MptProofFn = "mpt/proof" - MptPairsIt = "mpt/pairs" - MptStateFn = "mpt/state" - TxCommitFn = "commit" TxDisposeFn = "dispose" TxLevelFn = "level" @@ -152,7 +142,6 @@ func toStr(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db" func toStr(rc: CoreDbRc[CoreDbKvtRef]): string = rc.toStr "kvt" func toStr(rc: CoreDbRc[CoreDbTxRef]): string = rc.toStr "tx" func toStr(rc: CoreDbRc[CoreDbCtxRef]): string = rc.toStr "ctx" -func toStr(rc: CoreDbRc[CoreDbMptRef]): string = rc.toStr "mpt" func toStr(rc: CoreDbRc[CoreDbAccRef]): string = rc.toStr "acc" # ------------------------------------------------------------------------------ diff --git a/nimbus/db/core_db/base/base_desc.nim b/nimbus/db/core_db/base/base_desc.nim index 63ba6ddb81..f730dc3e1a 100644 --- a/nimbus/db/core_db/base/base_desc.nim +++ b/nimbus/db/core_db/base/base_desc.nim @@ -26,9 +26,6 @@ const CoreDbPersistentTypes* = {AristoDbRocks} ## List of persistent DB types (currently only a single one) - CoreDbVidGeneric* = VertexID(2) - ## Generic `MPT` root vertex ID for calculating Merkle hashes - type CoreDbProfListRef* = AristoDbProfListRef ## Borrowed from `aristo_profile`, only used in profiling mode @@ -82,7 +79,7 @@ type tracerHook*: RootRef ## Debugging/tracing CoreDbCtxRef* = ref object - ## Shared context for `CoreDbMptRef`, `CoreDbAccRef`, `CoreDbKvtRef` + ## Shared context for `CoreDbAccRef`, `CoreDbKvtRef` parent*: CoreDbRef mpt*: AristoDbRef ## `Aristo` database kvt*: KvtDbRef ## `KVT` key-value table @@ -93,9 +90,6 @@ type CoreDbAccRef* = distinct CoreDbCtxRef ## Similar to `CoreDbKvtRef`, only dealing with `Aristo` accounts - CoreDbMptRef* = distinct CoreDbCtxRef - ## Generic MPT - CoreDbTxRef* = ref object ## Transaction descriptor ctx*: CoreDbCtxRef ## Context (also contains `Aristo` descriptor) diff --git a/nimbus/db/core_db/base/base_helpers.nim b/nimbus/db/core_db/base/base_helpers.nim index 1b7a0aef41..01b12e8e62 100644 --- a/nimbus/db/core_db/base/base_helpers.nim +++ b/nimbus/db/core_db/base/base_helpers.nim @@ -40,7 +40,7 @@ proc bless*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef = ctx.validate ctx -proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbMptRef | CoreDbTxRef): auto = +proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbTxRef): auto = dsc.ctx = ctx when CoreDbAutoValidateDescriptors: dsc.validate @@ -58,13 +58,13 @@ template ctx*(kvt: CoreDbKvtRef): CoreDbCtxRef = # --------------- -template call*(api: KvtApiRef; fn: untyped; args: varArgs[untyped]): untyped = +template call*(api: KvtApiRef; fn: untyped; args: varargs[untyped]): untyped = when CoreDbEnableApiJumpTable: api.fn(args) else: fn(args) -template call*(kvt: CoreDbKvtRef; fn: untyped; args: varArgs[untyped]): untyped = +template call*(kvt: CoreDbKvtRef; fn: untyped; args: varargs[untyped]): untyped = CoreDbCtxRef(kvt).parent.kvtApi.call(fn, args) # --------------- @@ -80,7 +80,7 @@ func toError*(e: KvtError; s: string; error = Unspecified): CoreDbError = # Public Aristo helpers # ------------------------------------------------------------------------------ -template mpt*(dsc: CoreDbAccRef | CoreDbMptRef): AristoDbRef = +template mpt*(dsc: CoreDbAccRef): AristoDbRef = CoreDbCtxRef(dsc).mpt template mpt*(tx: CoreDbTxRef): AristoDbRef = @@ -91,16 +91,16 @@ template ctx*(acc: CoreDbAccRef): CoreDbCtxRef = # --------------- -template call*(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped = +template call*(api: AristoApiRef; fn: untyped; args: varargs[untyped]): untyped = when CoreDbEnableApiJumpTable: api.fn(args) else: fn(args) template call*( - acc: CoreDbAccRef | CoreDbMptRef; + acc: CoreDbAccRef; fn: untyped; - args: varArgs[untyped]; + args: varargs[untyped]; ): untyped = CoreDbCtxRef(acc).parent.ariApi.call(fn, args) diff --git a/nimbus/db/core_db/base_iterators.nim b/nimbus/db/core_db/base_iterators.nim index d4e3c168a3..ca26084457 100644 --- a/nimbus/db/core_db/base_iterators.nim +++ b/nimbus/db/core_db/base_iterators.nim @@ -40,7 +40,7 @@ when CoreDbEnableApiTracking: template valueOrApiError[U,V](rc: Result[U,V]; info: static[string]): U = rc.valueOr: raise (ref CoreDbApiError)(msg: info) -template dbType(dsc: CoreDbKvtRef | CoreDbMptRef | CoreDbAccRef): CoreDbType = +template dbType(dsc: CoreDbKvtRef | CoreDbAccRef): CoreDbType = dsc.distinctBase.parent.dbType # --------------- @@ -48,30 +48,30 @@ template dbType(dsc: CoreDbKvtRef | CoreDbMptRef | CoreDbAccRef): CoreDbType = template kvt(dsc: CoreDbKvtRef): KvtDbRef = dsc.distinctBase.kvt -template call(api: KvtApiRef; fn: untyped; args: varArgs[untyped]): untyped = +template call(api: KvtApiRef; fn: untyped; args: varargs[untyped]): untyped = when CoreDbEnableApiJumpTable: api.fn(args) else: fn(args) -template call(kvt: CoreDbKvtRef; fn: untyped; args: varArgs[untyped]): untyped = +template call(kvt: CoreDbKvtRef; fn: untyped; args: varargs[untyped]): untyped = kvt.distinctBase.parent.kvtApi.call(fn, args) # --------------- -template mpt(dsc: CoreDbAccRef | CoreDbMptRef): AristoDbRef = +template mpt(dsc: CoreDbAccRef): AristoDbRef = dsc.distinctBase.mpt -template call(api: AristoApiRef; fn: untyped; args: varArgs[untyped]): untyped = +template call(api: AristoApiRef; fn: untyped; args: varargs[untyped]): untyped = when CoreDbEnableApiJumpTable: api.fn(args) else: fn(args) template call( - acc: CoreDbAccRef | CoreDbMptRef; + acc: CoreDbAccRef; fn: untyped; - args: varArgs[untyped]; + args: varargs[untyped]; ): untyped = acc.distinctBase.parent.ariApi.call(fn, args) @@ -98,21 +98,7 @@ iterator pairs*(kvt: CoreDbKvtRef): (seq[byte], seq[byte]) {.apiRaise.} = raiseAssert: "Unsupported database type: " & $kvt.dbType kvt.ifTrackNewApi: debug logTxt, api, elapsed -iterator pairs*(mpt: CoreDbMptRef): (seq[byte], seq[byte]) = - ## Trie traversal, only supported for `CoreDbMptRef` - ## - mpt.setTrackNewApi MptPairsIt - case mpt.dbType: - of AristoDbMemory, AristoDbRocks, AristoDbVoid: - for (path,data) in mpt.mpt.rightPairsGeneric CoreDbVidGeneric: - yield (mpt.call(pathAsBlob, path), data) - of Ooops: - raiseAssert: "Unsupported database type: " & $mpt.dbType - mpt.ifTrackNewApi: debug logTxt, api, elapsed - iterator slotPairs*(acc: CoreDbAccRef; accPath: Hash32): (seq[byte], UInt256) = - ## Trie traversal, only supported for `CoreDbMptRef` - ## acc.setTrackNewApi AccSlotPairsIt case acc.dbType: of AristoDbMemory, AristoDbRocks, AristoDbVoid: diff --git a/nimbus/db/core_db/core_apps.nim b/nimbus/db/core_db/core_apps.nim index e71a833c12..7eccf7af3f 100644 --- a/nimbus/db/core_db/core_apps.nim +++ b/nimbus/db/core_db/core_apps.nim @@ -11,14 +11,15 @@ ## Rewrite of `core_apps.nim` using the new `CoreDb` API. The original ## `core_apps.nim` was renamed `core_apps_legacy.nim`. -{.push raises: [].} +{.push gcsafe, raises: [].} import - std/[algorithm, sequtils], + std/[sequtils], chronicles, eth/[common, rlp], stew/byteutils, - "../.."/[errors, constants], + results, + "../.."/[constants], ".."/[aristo, storage_types], "."/base @@ -26,9 +27,9 @@ logScope: topics = "core_db" type - TransactionKey = tuple - blockNumber: BlockNumber - index: uint + TransactionKey* = object + blockNumber*: BlockNumber + index*: uint # ------------------------------------------------------------------------------ # Forward declarations @@ -37,73 +38,35 @@ type proc getBlockHeader*( db: CoreDbRef; n: BlockNumber; - output: var Header; - ): bool - {.gcsafe.} + ): Result[Header, string] proc getBlockHeader*( db: CoreDbRef, blockHash: Hash32; - ): Header - {.gcsafe, raises: [BlockNotFound].} + ): Result[Header, string] proc getBlockHash*( db: CoreDbRef; n: BlockNumber; - output: var Hash32; - ): bool - {.gcsafe.} + ): Result[Hash32, string] proc addBlockNumberToHashLookup*( db: CoreDbRef; blockNumber: BlockNumber; blockHash: Hash32; - ) {.gcsafe.} + ) -proc getBlockHeader*( - db: CoreDbRef; - blockHash: Hash32; - output: var Header; - ): bool - {.gcsafe.} - -proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash32] {.gcsafe.} +proc getCanonicalHeaderHash*(db: CoreDbRef): Result[Hash32, string] # ------------------------------------------------------------------------------ # Private helpers # ------------------------------------------------------------------------------ -template discardRlpException(info: static[string]; code: untyped) = +template wrapRlpException(info: static[string]; code: untyped) = try: code except RlpError as e: - warn info, error=($e.name), err=e.msg, errName=e.name - -# ------------------------------------------------------------------------------ -# Private iterators -# ------------------------------------------------------------------------------ - -iterator findNewAncestors( - db: CoreDbRef; - header: Header; - ): Header = - ## Returns the chain leading up from the given header until the first - ## ancestor it has in common with our canonical chain. - var h = header - var orig: Header - while true: - if db.getBlockHeader(h.number, orig) and orig.rlpHash == h.rlpHash: - break - - yield h - - if h.parentHash == GENESIS_PARENT_HASH: - break - else: - if not db.getBlockHeader(h.parentHash, h): - warn "findNewAncestors(): Could not find parent while iterating", - hash = h.parentHash - break + return err(info & ": " & e.msg) # ------------------------------------------------------------------------------ # Public iterators @@ -184,118 +147,6 @@ iterator getReceipts*( break body yield rlp.decode(data, Receipt) -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -proc removeTransactionFromCanonicalChain( - db: CoreDbRef; - transactionHash: Hash32; - ) = - ## Removes the transaction specified by the given hash from the canonical - ## chain. - db.ctx.getKvt.del(transactionHashToBlockKey(transactionHash).toOpenArray).isOkOr: - warn "removeTransactionFromCanonicalChain", - transactionHash, error=($$error) - -proc setAsCanonicalChainHead( - db: CoreDbRef; - headerHash: Hash32; - header: Header; - ) = - ## Sets the header as the canonical chain HEAD. - const info = "setAsCanonicalChainHead()" - - # TODO This code handles reorgs - this should be moved elsewhere because we'll - # be handling reorgs mainly in-memory - if header.number == 0 or - db.getCanonicalHeaderHash().valueOr(default(Hash32)) != header.parentHash: - var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header)) - reverse(newCanonicalHeaders) - for h in newCanonicalHeaders: - var oldHash: Hash32 - if not db.getBlockHash(h.number, oldHash): - break - - try: - let oldHeader = db.getBlockHeader(oldHash) - for txHash in db.getBlockTransactionHashes(oldHeader): - db.removeTransactionFromCanonicalChain(txHash) - # TODO re-add txn to internal pending pool (only if local sender) - except BlockNotFound: - warn info & ": Could not load old header", oldHash - - for h in newCanonicalHeaders: - # TODO don't recompute block hash - db.addBlockNumberToHashLookup(h.number, h.blockHash) - - let canonicalHeadHash = canonicalHeadHashKey() - db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr: - warn info, canonicalHeadHash, error=($$error) - -proc markCanonicalChain( - db: CoreDbRef; - header: Header; - headerHash: Hash32; - ): bool = - ## mark this chain as canonical by adding block number to hash lookup - ## down to forking point - const - info = "markCanonicalChain()" - var - currHash = headerHash - currHeader = header - - # mark current header as canonical - let - kvt = db.ctx.getKvt() - key = blockNumberToHashKey(currHeader.number) - kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr: - warn info, key, error=($$error) - return false - - # it is a genesis block, done - if currHeader.parentHash == default(Hash32): - return true - - # mark ancestor blocks as canonical too - currHash = currHeader.parentHash - if not db.getBlockHeader(currHeader.parentHash, currHeader): - return false - - template rlpDecodeOrZero(data: openArray[byte]): Hash32 = - try: - rlp.decode(data, Hash32) - except RlpError as exc: - warn info, key, error=exc.msg - default(Hash32) - - while currHash != default(Hash32): - let key = blockNumberToHashKey(currHeader.number) - let data = kvt.getOrEmpty(key.toOpenArray).valueOr: - warn info, key, error=($$error) - return false - if data.len == 0: - # not marked, mark it - kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr: - warn info, key, error=($$error) - elif rlpDecodeOrZero(data) != currHash: - # replace prev chain - kvt.put(key.toOpenArray, rlp.encode(currHash)).isOkOr: - warn info, key, error=($$error) - else: - # forking point, done - break - - if currHeader.parentHash == default(Hash32): - break - - currHash = currHeader.parentHash - if not db.getBlockHeader(currHeader.parentHash, currHeader): - return false - - return true - # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ @@ -304,132 +155,59 @@ proc getSavedStateBlockNumber*( db: CoreDbRef; ): BlockNumber = ## Returns the block number registered when the database was last time - ## updated, or `BlockNumber(0)` if there was no updata found. + ## updated, or `BlockNumber(0)` if there was no update found. ## db.stateBlockNumber() proc getBlockHeader*( db: CoreDbRef; blockHash: Hash32; - output: var Header; - ): bool = + ): Result[Header, string] = const info = "getBlockHeader()" let data = db.ctx.getKvt().get(genericHashKey(blockHash).toOpenArray).valueOr: if error.error != KvtNotFound: warn info, blockHash, error=($$error) - return false + return err("No block with hash " & $blockHash) - discardRlpException info: - output = rlp.decode(data, Header) - return true + wrapRlpException info: + return ok(rlp.decode(data, Header)) proc getBlockHeader*( - db: CoreDbRef, - blockHash: Hash32; - ): Header = - ## Returns the requested block header as specified by block hash. - ## - ## Raises BlockNotFound if it is not present in the db. - if not db.getBlockHeader(blockHash, result): - raise newException( - BlockNotFound, "No block with hash " & blockHash.data.toHex) + db: CoreDbRef; + n: BlockNumber; + ): Result[Header, string] = + ## Returns the block header with the given number in the canonical chain. + let blockHash = ?db.getBlockHash(n) + db.getBlockHeader(blockHash) proc getHash( db: CoreDbRef; key: DbKey; - ): Opt[Hash32] = + ): Result[Hash32, string] = const info = "getHash()" let data = db.ctx.getKvt().get(key.toOpenArray).valueOr: if error.error != KvtNotFound: warn info, key, error=($$error) - return Opt.none(Hash32) + return err($$error) - try: - Opt.some(rlp.decode(data, Hash32)) - except RlpError as exc: - warn info, key, error=exc.msg - Opt.none(Hash32) + wrapRlpException info: + return ok(rlp.decode(data, Hash32)) -proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash32] = +proc getCanonicalHeaderHash*(db: CoreDbRef): Result[Hash32, string] = db.getHash(canonicalHeadHashKey()) proc getCanonicalHead*( db: CoreDbRef; - output: var Header; - ): bool = - let headHash = db.getCanonicalHeaderHash().valueOr: - return false - discardRlpException "getCanonicalHead()": - if db.getBlockHeader(headHash, output): - return true - -proc getCanonicalHead*( - db: CoreDbRef; - ): Header - {.gcsafe, raises: [EVMError].} = - if not db.getCanonicalHead result: - raise newException( - CanonicalHeadNotFound, "No canonical head set for this chain") + ): Result[Header, string] = + let headHash = ?db.getCanonicalHeaderHash() + db.getBlockHeader(headHash) proc getBlockHash*( db: CoreDbRef; n: BlockNumber; - output: var Hash32; - ): bool = + ): Result[Hash32, string] = ## Return the block hash for the given block number. - output = db.getHash(blockNumberToHashKey(n)).valueOr: - return false - true - -proc getBlockHash*( - db: CoreDbRef; - n: BlockNumber; - ): Hash32 - {.gcsafe, raises: [BlockNotFound].} = - ## Return the block hash for the given block number. - if not db.getBlockHash(n, result): - raise newException(BlockNotFound, "No block hash for number " & $n) - -proc getHeadBlockHash*(db: CoreDbRef): Hash32 = - db.getHash(canonicalHeadHashKey()).valueOr(default(Hash32)) - -proc getBlockHeader*( - db: CoreDbRef; - n: BlockNumber; - output: var Header; - ): bool = - ## Returns the block header with the given number in the canonical chain. - var blockHash: Hash32 - if db.getBlockHash(n, blockHash): - result = db.getBlockHeader(blockHash, output) - -proc getBlockHeaderWithHash*( - db: CoreDbRef; - n: BlockNumber; - ): Opt[(Header, Hash32)] = - ## Returns the block header and its hash, with the given number in the - ## canonical chain. Hash is returned to avoid recomputing it - var hash: Hash32 - if db.getBlockHash(n, hash): - # Note: this will throw if header is not present. - var header: Header - if db.getBlockHeader(hash, header): - return Opt.some((header, hash)) - else: - # this should not happen, but if it happen lets fail laudly as this means - # something is super wrong - raiseAssert("Corrupted database. Mapping number->hash present, without header in database") - else: - return Opt.none((Header, Hash32)) - -proc getBlockHeader*( - db: CoreDbRef; - n: BlockNumber; - ): Header - {.raises: [BlockNotFound].} = - ## Returns the block header with the given number in the canonical chain. - ## Raises BlockNotFound error if the block is not in the DB. - db.getBlockHeader(db.getBlockHash(n)) + db.getHash(blockNumberToHashKey(n)) proc getScore*( db: CoreDbRef; @@ -454,11 +232,6 @@ proc setScore*(db: CoreDbRef; blockHash: Hash32, score: UInt256) = warn "setScore()", scoreKey, error=($$error) return -proc getTd*(db: CoreDbRef; blockHash: Hash32, td: var UInt256): bool = - td = db.getScore(blockHash).valueOr: - return false - true - proc headTotalDifficulty*( db: CoreDbRef; ): UInt256 = @@ -471,16 +244,16 @@ proc getAncestorsHashes*( db: CoreDbRef; limit: BlockNumber; header: Header; - ): seq[Hash32] - {.gcsafe, raises: [BlockNotFound].} = - var ancestorCount = min(header.number, limit) - var h = header - - result = newSeq[Hash32](ancestorCount) + ): Result[seq[Hash32], string] = + var + ancestorCount = min(header.number, limit) + h = header + res = newSeq[Hash32](ancestorCount) while ancestorCount > 0: - h = db.getBlockHeader(h.parentHash) - result[ancestorCount - 1] = h.rlpHash + h = ?db.getBlockHeader(h.parentHash) + res[ancestorCount - 1] = h.rlpHash dec ancestorCount + ok(res) proc addBlockNumberToHashLookup*( db: CoreDbRef; blockNumber: BlockNumber, blockHash: Hash32) = @@ -506,7 +279,7 @@ proc persistTransactions*( encodedTx = rlp.encode(tx) txHash = keccak256(encodedTx) blockKey = transactionHashToBlockKey(txHash) - txKey: TransactionKey = (blockNumber, idx.uint) + txKey = TransactionKey(blockNumber: blockNumber, index: idx.uint) key = hashIndexKey(txRoot, idx.uint16) kvt.put(key, encodedTx).isOkOr: warn info, idx, error=($$error) @@ -521,41 +294,33 @@ proc forgetHistory*( ): bool = ## Remove all data related to the block number argument `num`. This function ## returns `true`, if some history was available and deleted. - var blockHash: Hash32 - if db.getBlockHash(blockNum, blockHash): - let kvt = db.ctx.getKvt() - # delete blockNum->blockHash - discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray) - result = true + let blockHash = db.getBlockHash(blockNum).valueOr: + return false - var header: Header - if db.getBlockHeader(blockHash, header): - # delete blockHash->header, stateRoot->blockNum - discard kvt.del(genericHashKey(blockHash).toOpenArray) + let kvt = db.ctx.getKvt() + # delete blockNum->blockHash + discard kvt.del(blockNumberToHashKey(blockNum).toOpenArray) + # delete blockHash->header, stateRoot->blockNum + discard kvt.del(genericHashKey(blockHash).toOpenArray) + true proc getTransactionByIndex*( db: CoreDbRef; txRoot: Hash32; txIndex: uint16; - res: var Transaction; - ): bool = + ): Result[Transaction, string] = const info = "getTransaction()" let kvt = db.ctx.getKvt() let key = hashIndexKey(txRoot, txIndex) let txData = kvt.getOrEmpty(key).valueOr: - warn info, txRoot, key, error=($$error) - return false + return err($$error) if txData.len == 0: - return false + return err("tx data is empty for root=" & $txRoot & " and index=" & $txIndex) - try: - res = rlp.decode(txData, Transaction) - except RlpError as e: - warn info, txRoot, err=e.msg, errName=e.name - return false - true + wrapRlpException info: + return ok(rlp.decode(txData, Transaction)) proc getTransactionCount*( db: CoreDbRef; @@ -581,32 +346,36 @@ proc getTransactionCount*( proc getUnclesCount*( db: CoreDbRef; ommersHash: Hash32; - ): int - {.gcsafe, raises: [RlpError].} = + ): Result[int, string] = const info = "getUnclesCount()" - if ommersHash != EMPTY_UNCLE_HASH: + if ommersHash == EMPTY_UNCLE_HASH: + return ok(0) + + wrapRlpException info: let encodedUncles = block: let key = genericHashKey(ommersHash) db.ctx.getKvt().get(key.toOpenArray).valueOr: if error.error == KvtNotFound: warn info, ommersHash, error=($$error) - return 0 - return rlpFromBytes(encodedUncles).listLen + return ok(0) + return ok(rlpFromBytes(encodedUncles).listLen) proc getUncles*( db: CoreDbRef; ommersHash: Hash32; - ): seq[Header] - {.gcsafe, raises: [RlpError].} = + ): Result[seq[Header], string] = const info = "getUncles()" - if ommersHash != EMPTY_UNCLE_HASH: + if ommersHash == EMPTY_UNCLE_HASH: + return ok(default(seq[Header])) + + wrapRlpException info: let encodedUncles = block: let key = genericHashKey(ommersHash) db.ctx.getKvt().get(key.toOpenArray).valueOr: if error.error == KvtNotFound: warn info, ommersHash, error=($$error) - return @[] - return rlp.decode(encodedUncles, seq[Header]) + return ok(default(seq[Header])) + return ok(rlp.decode(encodedUncles, seq[Header])) proc persistWithdrawals*( db: CoreDbRef; @@ -625,115 +394,103 @@ proc persistWithdrawals*( proc getWithdrawals*( db: CoreDbRef; - withdrawalsRoot: Hash32; - ): seq[Withdrawal] - {.gcsafe, raises: [RlpError].} = - for wd in db.getWithdrawals(withdrawalsRoot): - result.add(wd) + withdrawalsRoot: Hash32 + ): Result[seq[Withdrawal], string] = + wrapRlpException "getWithdrawals": + var res: seq[Withdrawal] + for wd in db.getWithdrawals(withdrawalsRoot): + res.add(wd) + return ok(res) proc getTransactions*( db: CoreDbRef; - txRoot: Hash32; - output: var seq[Transaction]) - {.gcsafe, raises: [RlpError].} = - for encodedTx in db.getBlockTransactionData(txRoot): - output.add(rlp.decode(encodedTx, Transaction)) - -proc getTransactions*( - db: CoreDbRef; - txRoot: Hash32; - ): seq[Transaction] - {.gcsafe, raises: [RlpError].} = - db.getTransactions(txRoot, result) + txRoot: Hash32 + ): Result[seq[Transaction], string] = + wrapRlpException "getTransactions": + var res: seq[Transaction] + for encodedTx in db.getBlockTransactionData(txRoot): + res.add(rlp.decode(encodedTx, Transaction)) + return ok(res) proc getBlockBody*( db: CoreDbRef; header: Header; - output: var BlockBody; - ): bool = - try: - output.transactions = db.getTransactions(header.txRoot) - output.uncles = db.getUncles(header.ommersHash) + ): Result[BlockBody, string] = + wrapRlpException "getBlockBody": + var body: BlockBody + body.transactions = ?db.getTransactions(header.txRoot) + body.uncles = ?db.getUncles(header.ommersHash) if header.withdrawalsRoot.isSome: - output.withdrawals = Opt.some(db.getWithdrawals(header.withdrawalsRoot.get)) - true - except RlpError: - false + let wds = ?db.getWithdrawals(header.withdrawalsRoot.get) + body.withdrawals = Opt.some(wds) + return ok(body) proc getBlockBody*( db: CoreDbRef; blockHash: Hash32; - output: var BlockBody; - ): bool = - var header: Header - if db.getBlockHeader(blockHash, header): - return db.getBlockBody(header, output) - -proc getBlockBody*( - db: CoreDbRef; - hash: Hash32; - ): BlockBody - {.gcsafe, raises: [BlockNotFound].} = - if not db.getBlockBody(hash, result): - raise newException(BlockNotFound, "Error when retrieving block body") + ): Result[BlockBody, string] = + let header = ?db.getBlockHeader(blockHash) + db.getBlockBody(header) proc getEthBlock*( db: CoreDbRef; hash: Hash32; - ): EthBlock - {.gcsafe, raises: [BlockNotFound].} = + ): Result[EthBlock, string] = var - header = db.getBlockHeader(hash) - blockBody = db.getBlockBody(hash) - EthBlock.init(move(header), move(blockBody)) + header = ?db.getBlockHeader(hash) + blockBody = ?db.getBlockBody(hash) + ok(EthBlock.init(move(header), move(blockBody))) proc getEthBlock*( db: CoreDbRef; blockNumber: BlockNumber; - ): EthBlock - {.gcsafe, raises: [BlockNotFound].} = + ): Result[EthBlock, string] = var - header = db.getBlockHeader(blockNumber) + header = ?db.getBlockHeader(blockNumber) headerHash = header.blockHash - blockBody = db.getBlockBody(headerHash) - EthBlock.init(move(header), move(blockBody)) + blockBody = ?db.getBlockBody(headerHash) + ok(EthBlock.init(move(header), move(blockBody))) + proc getUncleHashes*( db: CoreDbRef; blockHashes: openArray[Hash32]; - ): seq[Hash32] - {.gcsafe, raises: [BlockNotFound].} = + ): Result[seq[Hash32], string] = + var res: seq[Hash32] for blockHash in blockHashes: - result &= db.getBlockBody(blockHash).uncles.mapIt(it.rlpHash) + let body = ?db.getBlockBody(blockHash) + res &= body.uncles.mapIt(it.rlpHash) + ok(res) proc getUncleHashes*( db: CoreDbRef; header: Header; - ): seq[Hash32] - {.gcsafe, raises: [RlpError].} = + ): Result[seq[Hash32], string] = if header.ommersHash != EMPTY_UNCLE_HASH: + return ok(default(seq[Hash32])) + + wrapRlpException "getUncleHashes": let key = genericHashKey(header.ommersHash) encodedUncles = db.ctx.getKvt().get(key.toOpenArray).valueOr: if error.error == KvtNotFound: warn "getUncleHashes()", ommersHash=header.ommersHash, error=($$error) - return @[] - return rlp.decode(encodedUncles, seq[Header]).mapIt(it.rlpHash) + return ok(default(seq[Hash32])) + return ok(rlp.decode(encodedUncles, seq[Header]).mapIt(it.rlpHash)) proc getTransactionKey*( db: CoreDbRef; transactionHash: Hash32; - ): tuple[blockNumber: BlockNumber, index: uint64] - {.gcsafe, raises: [RlpError].} = - let - txKey = transactionHashToBlockKey(transactionHash) - tx = db.ctx.getKvt().get(txKey.toOpenArray).valueOr: - if error.error == KvtNotFound: - warn "getTransactionKey()", transactionHash, error=($$error) - return (0.BlockNumber, 0) - let key = rlp.decode(tx, TransactionKey) - (key.blockNumber, key.index.uint64) + ): Result[TransactionKey, string] = + wrapRlpException "getTransactionKey": + let + txKey = transactionHashToBlockKey(transactionHash) + tx = db.ctx.getKvt().get(txKey.toOpenArray).valueOr: + if error.error == KvtNotFound: + warn "getTransactionKey()", transactionHash, error=($$error) + return ok(default(TransactionKey)) + return ok(rlp.decode(tx, TransactionKey)) proc headerExists*(db: CoreDbRef; blockHash: Hash32): bool = ## Returns True if the header with the given block hash is in our DB. @@ -745,38 +502,26 @@ proc headerExists*(db: CoreDbRef; blockHash: Hash32): bool = proc setHead*( db: CoreDbRef; blockHash: Hash32; - ): bool = - var header: Header - if not db.getBlockHeader(blockHash, header): - return false - - if not db.markCanonicalChain(header, blockHash): - return false - + ): Result[void, string] = let canonicalHeadHash = canonicalHeadHashKey() db.ctx.getKvt.put(canonicalHeadHash.toOpenArray, rlp.encode(blockHash)).isOkOr: - warn "setHead()", canonicalHeadHash, error=($$error) - return true + return err($$error) + ok() proc setHead*( db: CoreDbRef; header: Header; writeHeader = false; - ): bool = - const info = "setHead()" + ): Result[void, string] = var headerHash = rlpHash(header) let kvt = db.ctx.getKvt() if writeHeader: kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header)).isOkOr: - warn info, headerHash, error=($$error) - return false - if not db.markCanonicalChain(header, headerHash): - return false + return err($$error) let canonicalHeadHash = canonicalHeadHashKey() kvt.put(canonicalHeadHash.toOpenArray, rlp.encode(headerHash)).isOkOr: - warn info, canonicalHeadHash, error=($$error) - return false - true + return err($$error) + ok() proc persistReceipts*( db: CoreDbRef; @@ -796,34 +541,33 @@ proc persistReceipts*( proc getReceipts*( db: CoreDbRef; receiptsRoot: Hash32; - ): seq[Receipt] - {.gcsafe, raises: [RlpError].} = - var receipts = newSeq[Receipt]() - for r in db.getReceipts(receiptsRoot): - receipts.add(r) - return receipts + ): Result[seq[Receipt], string] = + wrapRlpException "getReceipts": + var receipts = newSeq[Receipt]() + for r in db.getReceipts(receiptsRoot): + receipts.add(r) + return ok(receipts) proc persistScore*( db: CoreDbRef; blockHash: Hash32; score: UInt256 - ): bool = + ): Result[void, string] = const info = "persistScore" let kvt = db.ctx.getKvt() scoreKey = blockHashToScoreKey(blockHash) kvt.put(scoreKey.toOpenArray, rlp.encode(score)).isOkOr: - warn info, scoreKey, error=($$error) - return - true + return err(info & ": " & $$error) + ok() proc persistHeader*( db: CoreDbRef; blockHash: Hash32; header: Header; startOfHistory = GENESIS_PARENT_HASH; - ): bool = + ): Result[void, string] = const info = "persistHeader" let @@ -831,12 +575,10 @@ proc persistHeader*( isStartOfHistory = header.parentHash == startOfHistory if not isStartOfHistory and not db.headerExists(header.parentHash): - warn info & ": parent header missing", blockNumber=header.number - return false + return err(info & ": parent header missing number " & $header.number) kvt.put(genericHashKey(blockHash).toOpenArray, rlp.encode(header)).isOkOr: - warn info, blockHash, blockNumber=header.number, error=($$error) - return false + return err(info & ": " & $$error) let parentScore = if isStartOfHistory: @@ -845,17 +587,15 @@ proc persistHeader*( db.getScore(header.parentHash).valueOr: # TODO it's slightly wrong to fail here and leave the block in the db, # but this code is going away soon enough - return false + return err(info & ": cannot get score") score = parentScore + header.difficulty # After EIP-3675, difficulty is set to 0 but we still save the score for # each block to simplify totalDifficulty reporting # TODO get rid of this and store a single value - if not db.persistScore(blockHash, score): - return false - + ?db.persistScore(blockHash, score) db.addBlockNumberToHashLookup(header.number, blockHash) - true + ok() proc persistHeader*( db: CoreDbRef; @@ -863,32 +603,29 @@ proc persistHeader*( header: Header; forceCanonical: bool; startOfHistory = GENESIS_PARENT_HASH; - ): bool = - if not db.persistHeader(blockHash, header, startOfHistory): - return false + ): Result[void, string] = + ?db.persistHeader(blockHash, header, startOfHistory) if not forceCanonical and header.parentHash != startOfHistory: let - canonicalHash = db.getCanonicalHeaderHash().valueOr: - return false + canonicalHash = ?db.getCanonicalHeaderHash() canonScore = db.getScore(canonicalHash).valueOr: - return false + return err("cannot load canon score") # TODO no need to load score from database _really_, but this code is # hopefully going away soon score = db.getScore(blockHash).valueOr: - return false + return err("cannot load score") if score <= canonScore: - return true + return ok() - db.setAsCanonicalChainHead(blockHash, header) - true + db.setHead(blockHash) proc persistHeader*( db: CoreDbRef; header: Header; forceCanonical: bool; startOfHistory = GENESIS_PARENT_HASH; - ): bool = + ): Result[void, string] = let blockHash = header.blockHash db.persistHeader(blockHash, header, forceCanonical, startOfHistory) @@ -925,14 +662,12 @@ proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash32) = proc safeHeader*( db: CoreDbRef; - ): Header - {.gcsafe, raises: [BlockNotFound].} = + ): Result[Header, string] = db.getBlockHeader(db.safeHeaderHash) proc finalizedHeader*( db: CoreDbRef; - ): Header - {.gcsafe, raises: [BlockNotFound].} = + ): Result[Header, string] = db.getBlockHeader(db.finalizedHeaderHash) # ------------------------------------------------------------------------------ diff --git a/nimbus/db/ledger.nim b/nimbus/db/ledger.nim index cf1f140a8d..0e1f07b037 100644 --- a/nimbus/db/ledger.nim +++ b/nimbus/db/ledger.nim @@ -5,31 +5,924 @@ # http://www.apache.org/licenses/LICENSE-2.0) # * MIT license ([LICENSE-MIT](LICENSE-MIT) or # http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -## Unifies different ledger management APIs. All ledger objects are -## derived from the base objects -## :: -## LedgerSpRef => SavePoint, overloaded SavePoint etc -## +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. + {.push raises: [].} import - eth/common, - ./core_db, - ./ledger/backend/accounts_ledger, - ./ledger/base/[base_config, base_desc, base_helpers], - ./ledger/[base, base_iterators] + std/[tables, hashes, sets, typetraits], + chronicles, + eth/common/eth_types, + results, + minilru, + ../utils/mergeutils, + ../evm/code_bytes, + ../stateless/multi_keys, + "/.."/[constants, utils/utils], + ./access_list as ac_access_list, + "."/[core_db, storage_types, transient_storage], + ./aristo/aristo_blobify + +export + code_bytes + +const + debugLedgerRef = false + codeLruSize = 16*1024 + # An LRU cache of 16K items gives roughly 90% hit rate anecdotally on a + # small range of test blocks - this number could be studied in more detail + # Per EIP-170, a the code of a contract can be up to `MAX_CODE_SIZE` = 24kb, + # which would cause a worst case of 386MB memory usage though in reality + # code sizes are much smaller - it would make sense to study these numbers + # in greater detail. + slotsLruSize = 16 * 1024 + +type + AccountFlag = enum + Alive + IsNew + Dirty + Touched + CodeChanged + StorageChanged + NewlyCreated # EIP-6780: self destruct only in same transaction + + AccountFlags = set[AccountFlag] + + AccountRef = ref object + statement: CoreDbAccount + accPath: Hash32 + flags: AccountFlags + code: CodeBytesRef + originalStorage: TableRef[UInt256, UInt256] + overlayStorage: Table[UInt256, UInt256] + + WitnessData* = object + storageKeys*: HashSet[UInt256] + codeTouched*: bool + + LedgerRef* = ref object + ledger: CoreDbAccRef # AccountLedger + kvt: CoreDbKvtRef + savePoint: LedgerSpRef + witnessCache: Table[Address, WitnessData] + isDirty: bool + ripemdSpecial: bool + storeSlotHash*: bool + cache: Table[Address, AccountRef] + # Second-level cache for the ledger save point, which is cleared on every + # persist + code: LruCache[Hash32, CodeBytesRef] + ## The code cache provides two main benefits: + ## + ## * duplicate code is shared in memory beween accounts + ## * the jump destination table does not have to be recomputed for every + ## execution, for commonly called called contracts + ## + ## The former feature is specially important in the 2.3-2.7M block range + ## when underpriced code opcodes are being run en masse - both advantages + ## help performance broadly as well. + + slots: LruCache[UInt256, Hash32] + ## Because the same slots often reappear, we want to avoid writing them + ## over and over again to the database to avoid the WAL and compation + ## write amplification that ensues + + ReadOnlyStateDB* = distinct LedgerRef + + TransactionState = enum + Pending + Committed + RolledBack + + LedgerSpRef* = ref object + parentSavepoint: LedgerSpRef + cache: Table[Address, AccountRef] + dirty: Table[Address, AccountRef] + selfDestruct: HashSet[Address] + logEntries: seq[Log] + accessList: ac_access_list.AccessList + transientStorage: TransientStorage + state: TransactionState + when debugLedgerRef: + depth: int + +const + emptyEthAccount = Account.init() + + resetFlags = { + Dirty, + IsNew, + Touched, + CodeChanged, + StorageChanged, + NewlyCreated + } + +when debugLedgerRef: + import + stew/byteutils + + proc inspectSavePoint(name: string, x: LedgerSpRef) = + debugEcho "*** ", name, ": ", x.depth, " ***" + var sp = x + while sp != nil: + for address, acc in sp.cache: + debugEcho address.toHex, " ", acc.flags + sp = sp.parentSavepoint + +template logTxt(info: static[string]): static[string] = + "LedgerRef " & info + +template toAccountKey(acc: AccountRef): Hash32 = + acc.accPath + +template toAccountKey(eAddr: Address): Hash32 = + eAddr.data.keccak256 + + +proc beginSavepoint*(ac: LedgerRef): LedgerSpRef {.gcsafe.} + +proc resetCoreDbAccount(ac: LedgerRef, acc: AccountRef) = + const info = "resetCoreDbAccount(): " + ac.ledger.clearStorage(acc.toAccountKey).isOkOr: + raiseAssert info & $$error + acc.statement.nonce = emptyEthAccount.nonce + acc.statement.balance = emptyEthAccount.balance + acc.statement.codeHash = emptyEthAccount.codeHash + +proc getAccount( + ac: LedgerRef; + address: Address; + shouldCreate = true; + ): AccountRef = + + # search account from layers of cache + var sp = ac.savePoint + while sp != nil: + result = sp.cache.getOrDefault(address) + if not result.isNil: + return + sp = sp.parentSavepoint + + if ac.cache.pop(address, result): + # Check second-level cache + ac.savePoint.cache[address] = result + return + + # not found in cache, look into state trie + let + accPath = address.toAccountKey + rc = ac.ledger.fetch accPath + if rc.isOk: + result = AccountRef( + statement: rc.value, + accPath: accPath, + flags: {Alive}) + elif shouldCreate: + result = AccountRef( + statement: CoreDbAccount( + nonce: emptyEthAccount.nonce, + balance: emptyEthAccount.balance, + codeHash: emptyEthAccount.codeHash), + accPath: accPath, + flags: {Alive, IsNew}) + else: + return # ignore, don't cache + + # cache the account + ac.savePoint.cache[address] = result + ac.savePoint.dirty[address] = result + +proc clone(acc: AccountRef, cloneStorage: bool): AccountRef = + result = AccountRef( + statement: acc.statement, + accPath: acc.accPath, + flags: acc.flags, + code: acc.code) + + if cloneStorage: + result.originalStorage = acc.originalStorage + # it's ok to clone a table this way + result.overlayStorage = acc.overlayStorage + +proc isEmpty(acc: AccountRef): bool = + acc.statement.nonce == 0 and + acc.statement.balance.isZero and + acc.statement.codeHash == EMPTY_CODE_HASH + +template exists(acc: AccountRef): bool = + Alive in acc.flags + +proc originalStorageValue( + acc: AccountRef; + slot: UInt256; + ac: LedgerRef; + ): UInt256 = + # share the same original storage between multiple + # versions of account + if acc.originalStorage.isNil: + acc.originalStorage = newTable[UInt256, UInt256]() + else: + acc.originalStorage[].withValue(slot, val) do: + return val[] + + # Not in the original values cache - go to the DB. + let + slotKey = ac.slots.get(slot).valueOr: + slot.toBytesBE.keccak256 + rc = ac.ledger.slotFetch(acc.toAccountKey, slotKey) + if rc.isOk: + result = rc.value + + acc.originalStorage[slot] = result + +proc storageValue( + acc: AccountRef; + slot: UInt256; + ac: LedgerRef; + ): UInt256 = + acc.overlayStorage.withValue(slot, val) do: + return val[] + do: + result = acc.originalStorageValue(slot, ac) + +proc kill(ac: LedgerRef, acc: AccountRef) = + acc.flags.excl Alive + acc.overlayStorage.clear() + acc.originalStorage = nil + ac.resetCoreDbAccount acc + acc.code.reset() + +type + PersistMode = enum + DoNothing + Update + Remove + +proc persistMode(acc: AccountRef): PersistMode = + result = DoNothing + if Alive in acc.flags: + if IsNew in acc.flags or Dirty in acc.flags: + result = Update + else: + if IsNew notin acc.flags: + result = Remove + +proc persistCode(acc: AccountRef, ac: LedgerRef) = + if acc.code.len != 0 and not acc.code.persisted: + let rc = ac.kvt.put( + contractHashKey(acc.statement.codeHash).toOpenArray, acc.code.bytes()) + if rc.isErr: + warn logTxt "persistCode()", + codeHash=acc.statement.codeHash, error=($$rc.error) + else: + # If the ledger changes rolled back entirely from the database, the ledger + # code cache must also be cleared! + acc.code.persisted = true + +proc persistStorage(acc: AccountRef, ac: LedgerRef) = + const info = "persistStorage(): " + + if acc.overlayStorage.len == 0: + # TODO: remove the storage too if we figure out + # how to create 'virtual' storage room for each account + return + + if acc.originalStorage.isNil: + acc.originalStorage = newTable[UInt256, UInt256]() + + # Make sure that there is an account entry on the database. This is needed by + # `Aristo` for updating the account's storage area reference. As a side effect, + # this action also updates the latest statement data. + ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr: + raiseAssert info & $$error + + # Save `overlayStorage[]` on database + for slot, value in acc.overlayStorage: + acc.originalStorage[].withValue(slot, v): + if v[] == value: + continue # Avoid writing A-B-A updates + + var cached = true + let slotKey = ac.slots.get(slot).valueOr: + cached = false + let hash = slot.toBytesBE.keccak256 + ac.slots.put(slot, hash) + hash + + if value > 0: + ac.ledger.slotMerge(acc.toAccountKey, slotKey, value).isOkOr: + raiseAssert info & $$error + + # move the overlayStorage to originalStorage, related to EIP2200, EIP1283 + acc.originalStorage[slot] = value + + else: + ac.ledger.slotDelete(acc.toAccountKey, slotKey).isOkOr: + if error.error != StoNotFound: + raiseAssert info & $$error + discard + acc.originalStorage.del(slot) + + if ac.storeSlotHash and not cached: + # Write only if it was not cached to avoid writing the same data over and + # over.. + let + key = slotKey.data.slotHashToSlotKey + rc = ac.kvt.put(key.toOpenArray, blobify(slot).data) + if rc.isErr: + warn logTxt "persistStorage()", slot, error=($$rc.error) + + acc.overlayStorage.clear() + +proc makeDirty(ac: LedgerRef, address: Address, cloneStorage = true): AccountRef = + ac.isDirty = true + result = ac.getAccount(address) + if address in ac.savePoint.cache: + # it's already in latest savepoint + result.flags.incl Dirty + ac.savePoint.dirty[address] = result + return + + # put a copy into latest savepoint + result = result.clone(cloneStorage) + result.flags.incl Dirty + ac.savePoint.cache[address] = result + ac.savePoint.dirty[address] = result + +# ------------------------------------------------------------------------------ +# Public methods +# ------------------------------------------------------------------------------ + +# The LedgerRef is modeled after TrieDatabase for it's transaction style +proc init*(x: typedesc[LedgerRef], db: CoreDbRef, storeSlotHash: bool): LedgerRef = + new result + result.ledger = db.ctx.getAccounts() + result.kvt = db.ctx.getKvt() + result.witnessCache = Table[Address, WitnessData]() + result.storeSlotHash = storeSlotHash + result.code = typeof(result.code).init(codeLruSize) + result.slots = typeof(result.slots).init(slotsLruSize) + discard result.beginSavepoint + +proc init*(x: typedesc[LedgerRef], db: CoreDbRef): LedgerRef = + init(x, db, false) + +proc getStateRoot*(ac: LedgerRef): Hash32 = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + # make sure all cache already committed + doAssert(ac.isDirty == false) + ac.ledger.getStateRoot().expect("working database") + +proc isTopLevelClean*(ac: LedgerRef): bool = + ## Getter, returns `true` if all pending data have been commited. + not ac.isDirty and ac.savePoint.parentSavepoint.isNil + +proc beginSavepoint*(ac: LedgerRef): LedgerSpRef = + new result + result.cache = Table[Address, AccountRef]() + result.accessList.init() + result.transientStorage.init() + result.state = Pending + result.parentSavepoint = ac.savePoint + ac.savePoint = result + + when debugLedgerRef: + if not result.parentSavePoint.isNil: + result.depth = result.parentSavePoint.depth + 1 + inspectSavePoint("snapshot", result) + +proc rollback*(ac: LedgerRef, sp: LedgerSpRef) = + # Transactions should be handled in a strictly nested fashion. + # Any child transaction must be committed or rolled-back before + # its parent transactions: + doAssert ac.savePoint == sp and sp.state == Pending + ac.savePoint = sp.parentSavepoint + sp.state = RolledBack + + when debugLedgerRef: + inspectSavePoint("rollback", ac.savePoint) + +proc commit*(ac: LedgerRef, sp: LedgerSpRef) = + # Transactions should be handled in a strictly nested fashion. + # Any child transaction must be committed or rolled-back before + # its parent transactions: + doAssert ac.savePoint == sp and sp.state == Pending + # cannot commit most inner savepoint + doAssert not sp.parentSavepoint.isNil + + ac.savePoint = sp.parentSavepoint + ac.savePoint.cache.mergeAndReset(sp.cache) + ac.savePoint.dirty.mergeAndReset(sp.dirty) + ac.savePoint.transientStorage.mergeAndReset(sp.transientStorage) + ac.savePoint.accessList.mergeAndReset(sp.accessList) + ac.savePoint.selfDestruct.mergeAndReset(sp.selfDestruct) + ac.savePoint.logEntries.mergeAndReset(sp.logEntries) + sp.state = Committed + + when debugLedgerRef: + inspectSavePoint("commit", ac.savePoint) + +proc dispose*(ac: LedgerRef, sp: LedgerSpRef) = + if sp.state == Pending: + ac.rollback(sp) + +proc safeDispose*(ac: LedgerRef, sp: LedgerSpRef) = + if (not isNil(sp)) and (sp.state == Pending): + ac.rollback(sp) + +proc getCodeHash*(ac: LedgerRef, address: Address): Hash32 = + let acc = ac.getAccount(address, false) + if acc.isNil: emptyEthAccount.codeHash + else: acc.statement.codeHash + +proc getBalance*(ac: LedgerRef, address: Address): UInt256 = + let acc = ac.getAccount(address, false) + if acc.isNil: emptyEthAccount.balance + else: acc.statement.balance + +proc getNonce*(ac: LedgerRef, address: Address): AccountNonce = + let acc = ac.getAccount(address, false) + if acc.isNil: emptyEthAccount.nonce + else: acc.statement.nonce + +proc getCode*(ac: LedgerRef, address: Address): CodeBytesRef = + # Always returns non-nil! + let acc = ac.getAccount(address, false) + if acc.isNil: + return CodeBytesRef() + + if acc.code == nil: + acc.code = + if acc.statement.codeHash != EMPTY_CODE_HASH: + ac.code.get(acc.statement.codeHash).valueOr: + var rc = ac.kvt.get(contractHashKey(acc.statement.codeHash).toOpenArray) + if rc.isErr: + warn logTxt "getCode()", codeHash=acc.statement.codeHash, error=($$rc.error) + CodeBytesRef() + else: + let newCode = CodeBytesRef.init(move(rc.value), persisted = true) + ac.code.put(acc.statement.codeHash, newCode) + newCode + else: + CodeBytesRef() + + acc.code + +proc getCodeSize*(ac: LedgerRef, address: Address): int = + let acc = ac.getAccount(address, false) + if acc.isNil: + return 0 + + if acc.code == nil: + if acc.statement.codeHash == EMPTY_CODE_HASH: + return 0 + acc.code = ac.code.get(acc.statement.codeHash).valueOr: + # On a cache miss, we don't fetch the code - instead, we fetch just the + # length - should the code itself be needed, it will typically remain + # cached and easily accessible in the database layer - this is to prevent + # EXTCODESIZE calls from messing up the code cache and thus causing + # recomputation of the jump destination table + var rc = ac.kvt.len(contractHashKey(acc.statement.codeHash).toOpenArray) + + return rc.valueOr: + warn logTxt "getCodeSize()", codeHash=acc.statement.codeHash, error=($$rc.error) + 0 + + acc.code.len() + +proc getCommittedStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + acc.originalStorageValue(slot, ac) + +proc getStorage*(ac: LedgerRef, address: Address, slot: UInt256): UInt256 = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + acc.storageValue(slot, ac) + +proc contractCollision*(ac: LedgerRef, address: Address): bool = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + acc.statement.nonce != 0 or + acc.statement.codeHash != EMPTY_CODE_HASH or + not ac.ledger.slotStorageEmptyOrVoid(acc.toAccountKey) + +proc accountExists*(ac: LedgerRef, address: Address): bool = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + acc.exists() + +proc isEmptyAccount*(ac: LedgerRef, address: Address): bool = + let acc = ac.getAccount(address, false) + doAssert not acc.isNil + doAssert acc.exists() + acc.isEmpty() + +proc isDeadAccount*(ac: LedgerRef, address: Address): bool = + let acc = ac.getAccount(address, false) + if acc.isNil: + return true + if not acc.exists(): + return true + acc.isEmpty() + +proc setBalance*(ac: LedgerRef, address: Address, balance: UInt256) = + let acc = ac.getAccount(address) + acc.flags.incl {Alive} + if acc.statement.balance != balance: + ac.makeDirty(address).statement.balance = balance + +proc addBalance*(ac: LedgerRef, address: Address, delta: UInt256) = + # EIP161: We must check emptiness for the objects such that the account + # clearing (0,0,0 objects) can take effect. + if delta.isZero: + let acc = ac.getAccount(address) + if acc.isEmpty: + ac.makeDirty(address).flags.incl Touched + return + ac.setBalance(address, ac.getBalance(address) + delta) + +proc subBalance*(ac: LedgerRef, address: Address, delta: UInt256) = + if delta.isZero: + # This zero delta early exit is important as shown in EIP-4788. + # If the account is created, it will change the state. + # But early exit will prevent the account creation. + # In this case, the SYSTEM_ADDRESS + return + ac.setBalance(address, ac.getBalance(address) - delta) + +proc setNonce*(ac: LedgerRef, address: Address, nonce: AccountNonce) = + let acc = ac.getAccount(address) + acc.flags.incl {Alive} + if acc.statement.nonce != nonce: + ac.makeDirty(address).statement.nonce = nonce + +proc incNonce*(ac: LedgerRef, address: Address) = + ac.setNonce(address, ac.getNonce(address) + 1) + +proc setCode*(ac: LedgerRef, address: Address, code: seq[byte]) = + let acc = ac.getAccount(address) + acc.flags.incl {Alive} + let codeHash = keccak256(code) + if acc.statement.codeHash != codeHash: + var acc = ac.makeDirty(address) + acc.statement.codeHash = codeHash + # Try to reuse cache entry if it exists, but don't save the code - it's not + # a given that it will be executed within LRU range + acc.code = ac.code.get(codeHash).valueOr(CodeBytesRef.init(code)) + acc.flags.incl CodeChanged + +proc setStorage*(ac: LedgerRef, address: Address, slot, value: UInt256) = + let acc = ac.getAccount(address) + acc.flags.incl {Alive} + let oldValue = acc.storageValue(slot, ac) + if oldValue != value: + var acc = ac.makeDirty(address) + acc.overlayStorage[slot] = value + acc.flags.incl StorageChanged + +proc clearStorage*(ac: LedgerRef, address: Address) = + const info = "clearStorage(): " + + # a.k.a createStateObject. If there is an existing account with + # the given address, it is overwritten. + + let acc = ac.getAccount(address) + acc.flags.incl {Alive, NewlyCreated} + + let empty = ac.ledger.slotStorageEmpty(acc.toAccountKey).valueOr: return + if not empty: + # need to clear the storage from the database first + let acc = ac.makeDirty(address, cloneStorage = false) + ac.ledger.clearStorage(acc.toAccountKey).isOkOr: + raiseAssert info & $$error + # update caches + if acc.originalStorage.isNil.not: + # also clear originalStorage cache, otherwise + # both getStorage and getCommittedStorage will + # return wrong value + acc.originalStorage.clear() + +proc deleteAccount*(ac: LedgerRef, address: Address) = + # make sure all savepoints already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + let acc = ac.getAccount(address) + ac.savePoint.dirty[address] = acc + ac.kill acc + +proc selfDestruct*(ac: LedgerRef, address: Address) = + ac.setBalance(address, 0.u256) + ac.savePoint.selfDestruct.incl address + +proc selfDestruct6780*(ac: LedgerRef, address: Address) = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + + if NewlyCreated in acc.flags: + ac.selfDestruct(address) + +proc selfDestructLen*(ac: LedgerRef): int = + ac.savePoint.selfDestruct.len + +proc addLogEntry*(ac: LedgerRef, log: Log) = + ac.savePoint.logEntries.add log + +proc getAndClearLogEntries*(ac: LedgerRef): seq[Log] = + swap(result, ac.savePoint.logEntries) + +proc ripemdSpecial*(ac: LedgerRef) = + ac.ripemdSpecial = true + +proc deleteEmptyAccount(ac: LedgerRef, address: Address) = + let acc = ac.getAccount(address, false) + if acc.isNil: + return + if not acc.isEmpty: + return + if not acc.exists: + return + + ac.savePoint.dirty[address] = acc + ac.kill acc + +proc clearEmptyAccounts(ac: LedgerRef) = + # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-161.md + for acc in ac.savePoint.dirty.values(): + if Touched in acc.flags and + acc.isEmpty and acc.exists: + ac.kill acc + + # https://github.com/ethereum/EIPs/issues/716 + if ac.ripemdSpecial: + ac.deleteEmptyAccount(RIPEMD_ADDR) + ac.ripemdSpecial = false + +proc persist*(ac: LedgerRef, + clearEmptyAccount: bool = false, + clearCache = false) = + const info = "persist(): " + + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + + if clearEmptyAccount: + ac.clearEmptyAccounts() + + for address in ac.savePoint.selfDestruct: + ac.deleteAccount(address) + + for (eAddr,acc) in ac.savePoint.dirty.pairs(): # This is a hotspot in block processing + case acc.persistMode() + of Update: + if CodeChanged in acc.flags: + acc.persistCode(ac) + if StorageChanged in acc.flags: + acc.persistStorage(ac) + else: + # This one is only necessary unless `persistStorage()` is run which needs + # to `merge()` the latest statement as well. + ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr: + raiseAssert info & $$error + of Remove: + ac.ledger.delete(acc.toAccountKey).isOkOr: + if error.error != AccNotFound: + raiseAssert info & $$error + ac.savePoint.cache.del eAddr + of DoNothing: + # dead man tell no tales + # remove touched dead account from cache + if Alive notin acc.flags: + ac.savePoint.cache.del eAddr + + acc.flags = acc.flags - resetFlags + ac.savePoint.dirty.clear() + + if clearCache: + # This overwrites the cache from the previous persist, providing a crude LRU + # scheme with little overhead + # TODO https://github.com/nim-lang/Nim/issues/23759 + swap(ac.cache, ac.savePoint.cache) + ac.savePoint.cache.reset() + + ac.savePoint.selfDestruct.clear() + + # EIP2929 + ac.savePoint.accessList.clear() + + ac.isDirty = false + +iterator addresses*(ac: LedgerRef): Address = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + for address, _ in ac.savePoint.cache: + yield address + +iterator accounts*(ac: LedgerRef): Account = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + for _, acc in ac.savePoint.cache: + yield ac.ledger.recast( + acc.toAccountKey, acc.statement).value + +iterator pairs*(ac: LedgerRef): (Address, Account) = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + for address, acc in ac.savePoint.cache: + yield (address, ac.ledger.recast( + acc.toAccountKey, acc.statement).value) + +iterator storage*( + ac: LedgerRef; + eAddr: Address; + ): (UInt256, UInt256) = + # beware that if the account not persisted, + # the storage root will not be updated + for (slotHash, value) in ac.ledger.slotPairs eAddr.toAccountKey: + let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray) + if rc.isErr: + warn logTxt "storage()", slotHash, error=($$rc.error) + continue + let r = deblobify(rc.value, UInt256) + if r.isErr: + warn logTxt "storage.deblobify", slotHash, msg=r.error + continue + yield (r.value, value) + +iterator cachedStorage*(ac: LedgerRef, address: Address): (UInt256, UInt256) = + let acc = ac.getAccount(address, false) + if not acc.isNil: + if not acc.originalStorage.isNil: + for k, v in acc.originalStorage: + yield (k, v) + +proc getStorageRoot*(ac: LedgerRef, address: Address): Hash32 = + # beware that if the account not persisted, + # the storage root will not be updated + let acc = ac.getAccount(address, false) + if acc.isNil: EMPTY_ROOT_HASH + else: ac.ledger.slotStorageRoot(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH + +proc update(wd: var WitnessData, acc: AccountRef) = + # once the code is touched make sure it doesn't get reset back to false in another update + if not wd.codeTouched: + wd.codeTouched = CodeChanged in acc.flags or acc.code != nil + + if not acc.originalStorage.isNil: + for k, v in acc.originalStorage: + if v.isZero: continue + wd.storageKeys.incl k + + for k, v in acc.overlayStorage: + wd.storageKeys.incl k + +proc witnessData(acc: AccountRef): WitnessData = + result.storageKeys = HashSet[UInt256]() + update(result, acc) + +proc collectWitnessData*(ac: LedgerRef) = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + # usually witness data is collected before we call persist() + for address, acc in ac.savePoint.cache: + ac.witnessCache.withValue(address, val) do: + update(val[], acc) + do: + ac.witnessCache[address] = witnessData(acc) + +func multiKeys(slots: HashSet[UInt256]): MultiKeysRef = + if slots.len == 0: return + new result + for x in slots: + result.add x.toBytesBE + result.sort() + +proc makeMultiKeys*(ac: LedgerRef): MultiKeysRef = + # this proc is called after we done executing a block + new result + for k, v in ac.witnessCache: + result.add(k, v.codeTouched, multiKeys(v.storageKeys)) + result.sort() + +proc accessList*(ac: LedgerRef, address: Address) = + ac.savePoint.accessList.add(address) + +proc accessList*(ac: LedgerRef, address: Address, slot: UInt256) = + ac.savePoint.accessList.add(address, slot) + +func inAccessList*(ac: LedgerRef, address: Address): bool = + var sp = ac.savePoint + while sp != nil: + result = sp.accessList.contains(address) + if result: + return + sp = sp.parentSavepoint + +func inAccessList*(ac: LedgerRef, address: Address, slot: UInt256): bool = + var sp = ac.savePoint + while sp != nil: + result = sp.accessList.contains(address, slot) + if result: + return + sp = sp.parentSavepoint + +func getTransientStorage*(ac: LedgerRef, + address: Address, slot: UInt256): UInt256 = + var sp = ac.savePoint + while sp != nil: + let (ok, res) = sp.transientStorage.getStorage(address, slot) + if ok: + return res + sp = sp.parentSavepoint + +proc setTransientStorage*(ac: LedgerRef, + address: Address, slot, val: UInt256) = + ac.savePoint.transientStorage.setStorage(address, slot, val) + +proc clearTransientStorage*(ac: LedgerRef) = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + ac.savePoint.transientStorage.clear() + +func getAccessList*(ac: LedgerRef): common.AccessList = + # make sure all savepoint already committed + doAssert(ac.savePoint.parentSavepoint.isNil) + ac.savePoint.accessList.getAccessList() + +proc getEthAccount*(ac: LedgerRef, address: Address): Account = + let acc = ac.getAccount(address, false) + if acc.isNil: + return emptyEthAccount + + ## Convert to legacy object, will throw an assert if that fails + let rc = ac.ledger.recast(acc.toAccountKey, acc.statement) + if rc.isErr: + raiseAssert "getAccount(): cannot convert account: " & $$rc.error + rc.value + +proc getAccountProof*(ac: LedgerRef, address: Address): seq[seq[byte]] = + let accProof = ac.ledger.proof(address.toAccountKey).valueOr: + raiseAssert "Failed to get account proof: " & $$error + + accProof[0] + +proc getStorageProof*(ac: LedgerRef, address: Address, slots: openArray[UInt256]): seq[seq[seq[byte]]] = + var storageProof = newSeqOfCap[seq[seq[byte]]](slots.len) + + let + addressHash = address.toAccountKey + accountExists = ac.ledger.hasPath(addressHash).valueOr: + raiseAssert "Call to hasPath failed: " & $$error + + for slot in slots: + if not accountExists: + storageProof.add(@[]) + continue + + let + slotKey = ac.slots.get(slot).valueOr: + slot.toBytesBE.keccak256 + slotProof = ac.ledger.slotProof(addressHash, slotKey).valueOr: + if error.aErr == FetchPathNotFound: + storageProof.add(@[]) + continue + else: + raiseAssert "Failed to get slot proof: " & $$error + storageProof.add(slotProof[0]) -export AccountsLedgerRef, base, base_config, base_iterators + storageProof # ------------------------------------------------------------------------------ -# Public constructor +# Public virtual read-only methods # ------------------------------------------------------------------------------ -proc init*(_: type LedgerRef, db: CoreDbRef, storeSlotHash: bool = false): LedgerRef = - LedgerRef(ac: AccountsLedgerRef.init(db, storeSlotHash)).bless(db) +proc getStateRoot*(db: ReadOnlyStateDB): Hash32 {.borrow.} +proc getCodeHash*(db: ReadOnlyStateDB, address: Address): Hash32 = getCodeHash(distinctBase db, address) +proc getStorageRoot*(db: ReadOnlyStateDB, address: Address): Hash32 = getStorageRoot(distinctBase db, address) +proc getBalance*(db: ReadOnlyStateDB, address: Address): UInt256 = getBalance(distinctBase db, address) +proc getStorage*(db: ReadOnlyStateDB, address: Address, slot: UInt256): UInt256 = getStorage(distinctBase db, address, slot) +proc getNonce*(db: ReadOnlyStateDB, address: Address): AccountNonce = getNonce(distinctBase db, address) +proc getCode*(db: ReadOnlyStateDB, address: Address): CodeBytesRef = getCode(distinctBase db, address) +proc getCodeSize*(db: ReadOnlyStateDB, address: Address): int = getCodeSize(distinctBase db, address) +proc contractCollision*(db: ReadOnlyStateDB, address: Address): bool = contractCollision(distinctBase db, address) +proc accountExists*(db: ReadOnlyStateDB, address: Address): bool = accountExists(distinctBase db, address) +proc isDeadAccount*(db: ReadOnlyStateDB, address: Address): bool = isDeadAccount(distinctBase db, address) +proc isEmptyAccount*(db: ReadOnlyStateDB, address: Address): bool = isEmptyAccount(distinctBase db, address) +proc getCommittedStorage*(db: ReadOnlyStateDB, address: Address, slot: UInt256): UInt256 = getCommittedStorage(distinctBase db, address, slot) +proc inAccessList*(db: ReadOnlyStateDB, address: Address): bool = inAccessList(distinctBase db, address) +proc inAccessList*(db: ReadOnlyStateDB, address: Address, slot: UInt256): bool = inAccessList(distinctBase db, address) +proc getTransientStorage*(db: ReadOnlyStateDB, + address: Address, slot: UInt256): UInt256 = getTransientStorage(distinctBase db, address, slot) +proc getAccountProof*(db: ReadOnlyStateDB, address: Address): seq[seq[byte]] = getAccountProof(distinctBase db, address) +proc getStorageProof*(db: ReadOnlyStateDB, address: Address, slots: openArray[UInt256]): seq[seq[seq[byte]]] = getStorageProof(distinctBase db, address, slots) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/db/ledger/backend/accounts_ledger.nim b/nimbus/db/ledger/backend/accounts_ledger.nim deleted file mode 100644 index 2b23b42529..0000000000 --- a/nimbus/db/ledger/backend/accounts_ledger.nim +++ /dev/null @@ -1,916 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed except -# according to those terms. - -{.push raises: [].} - -import - std/[tables, hashes, sets, typetraits], - chronicles, - eth/common/eth_types, - results, - minilru, - ../../../utils/mergeutils, - ../../../evm/code_bytes, - ../../../stateless/multi_keys, - "../../.."/[constants, utils/utils], - ../../access_list as ac_access_list, - "../.."/[core_db, storage_types, transient_storage], - ../../aristo/aristo_blobify - -const - debugAccountsLedgerRef = false - codeLruSize = 16*1024 - # An LRU cache of 16K items gives roughly 90% hit rate anecdotally on a - # small range of test blocks - this number could be studied in more detail - # Per EIP-170, a the code of a contract can be up to `MAX_CODE_SIZE` = 24kb, - # which would cause a worst case of 386MB memory usage though in reality - # code sizes are much smaller - it would make sense to study these numbers - # in greater detail. - slotsLruSize = 16 * 1024 - -type - AccountFlag = enum - Alive - IsNew - Dirty - Touched - CodeChanged - StorageChanged - NewlyCreated # EIP-6780: self destruct only in same transaction - - AccountFlags = set[AccountFlag] - - AccountRef = ref object - statement: CoreDbAccount - accPath: Hash32 - flags: AccountFlags - code: CodeBytesRef - originalStorage: TableRef[UInt256, UInt256] - overlayStorage: Table[UInt256, UInt256] - - WitnessData* = object - storageKeys*: HashSet[UInt256] - codeTouched*: bool - - AccountsLedgerRef* = ref object - ledger: CoreDbAccRef # AccountLedger - kvt: CoreDbKvtRef - savePoint: LedgerSavePoint - witnessCache: Table[Address, WitnessData] - isDirty: bool - ripemdSpecial: bool - storeSlotHash*: bool - cache: Table[Address, AccountRef] - # Second-level cache for the ledger save point, which is cleared on every - # persist - code: LruCache[Hash32, CodeBytesRef] - ## The code cache provides two main benefits: - ## - ## * duplicate code is shared in memory beween accounts - ## * the jump destination table does not have to be recomputed for every - ## execution, for commonly called called contracts - ## - ## The former feature is specially important in the 2.3-2.7M block range - ## when underpriced code opcodes are being run en masse - both advantages - ## help performance broadly as well. - - slots: LruCache[UInt256, Hash32] - ## Because the same slots often reappear, we want to avoid writing them - ## over and over again to the database to avoid the WAL and compation - ## write amplification that ensues - - ReadOnlyStateDB* = distinct AccountsLedgerRef - - TransactionState = enum - Pending - Committed - RolledBack - - LedgerSavePoint* = ref object - parentSavepoint: LedgerSavePoint - cache: Table[Address, AccountRef] - dirty: Table[Address, AccountRef] - selfDestruct: HashSet[Address] - logEntries: seq[Log] - accessList: ac_access_list.AccessList - transientStorage: TransientStorage - state: TransactionState - when debugAccountsLedgerRef: - depth: int - -const - emptyEthAccount = Account.init() - - resetFlags = { - Dirty, - IsNew, - Touched, - CodeChanged, - StorageChanged, - NewlyCreated - } - -when debugAccountsLedgerRef: - import - stew/byteutils - - proc inspectSavePoint(name: string, x: LedgerSavePoint) = - debugEcho "*** ", name, ": ", x.depth, " ***" - var sp = x - while sp != nil: - for address, acc in sp.cache: - debugEcho address.toHex, " ", acc.flags - sp = sp.parentSavepoint - -template logTxt(info: static[string]): static[string] = - "AccountsLedgerRef " & info - -template toAccountKey(acc: AccountRef): Hash32 = - acc.accPath - -template toAccountKey(eAddr: Address): Hash32 = - eAddr.data.keccak256 - - -proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint {.gcsafe.} - -proc resetCoreDbAccount(ac: AccountsLedgerRef, acc: AccountRef) = - const info = "resetCoreDbAccount(): " - ac.ledger.clearStorage(acc.toAccountKey).isOkOr: - raiseAssert info & $$error - acc.statement.nonce = emptyEthAccount.nonce - acc.statement.balance = emptyEthAccount.balance - acc.statement.codeHash = emptyEthAccount.codeHash - -# The AccountsLedgerRef is modeled after TrieDatabase for it's transaction style -proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef, storeSlotHash: bool): AccountsLedgerRef = - new result - result.ledger = db.ctx.getAccounts() - result.kvt = db.ctx.getKvt() - result.witnessCache = Table[Address, WitnessData]() - result.storeSlotHash = storeSlotHash - result.code = typeof(result.code).init(codeLruSize) - result.slots = typeof(result.slots).init(slotsLruSize) - discard result.beginSavepoint - -proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef): AccountsLedgerRef = - init(x, db, EMPTY_ROOT_HASH) - -proc getStateRoot*(ac: AccountsLedgerRef): Hash32 = - const info = "state(): " - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - # make sure all cache already committed - doAssert(ac.isDirty == false) - ac.ledger.stateRoot(updateOk=true).valueOr: - raiseAssert info & $$error - -proc isTopLevelClean*(ac: AccountsLedgerRef): bool = - ## Getter, returns `true` if all pending data have been commited. - not ac.isDirty and ac.savePoint.parentSavepoint.isNil - -proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint = - new result - result.cache = Table[Address, AccountRef]() - result.accessList.init() - result.transientStorage.init() - result.state = Pending - result.parentSavepoint = ac.savePoint - ac.savePoint = result - - when debugAccountsLedgerRef: - if not result.parentSavePoint.isNil: - result.depth = result.parentSavePoint.depth + 1 - inspectSavePoint("snapshot", result) - -proc rollback*(ac: AccountsLedgerRef, sp: LedgerSavePoint) = - # Transactions should be handled in a strictly nested fashion. - # Any child transaction must be committed or rolled-back before - # its parent transactions: - doAssert ac.savePoint == sp and sp.state == Pending - ac.savePoint = sp.parentSavepoint - sp.state = RolledBack - - when debugAccountsLedgerRef: - inspectSavePoint("rollback", ac.savePoint) - -proc commit*(ac: AccountsLedgerRef, sp: LedgerSavePoint) = - # Transactions should be handled in a strictly nested fashion. - # Any child transaction must be committed or rolled-back before - # its parent transactions: - doAssert ac.savePoint == sp and sp.state == Pending - # cannot commit most inner savepoint - doAssert not sp.parentSavepoint.isNil - - ac.savePoint = sp.parentSavepoint - ac.savePoint.cache.mergeAndReset(sp.cache) - ac.savePoint.dirty.mergeAndReset(sp.dirty) - ac.savePoint.transientStorage.mergeAndReset(sp.transientStorage) - ac.savePoint.accessList.mergeAndReset(sp.accessList) - ac.savePoint.selfDestruct.mergeAndReset(sp.selfDestruct) - ac.savePoint.logEntries.mergeAndReset(sp.logEntries) - sp.state = Committed - - when debugAccountsLedgerRef: - inspectSavePoint("commit", ac.savePoint) - -proc dispose*(ac: AccountsLedgerRef, sp: LedgerSavePoint) = - if sp.state == Pending: - ac.rollback(sp) - -proc safeDispose*(ac: AccountsLedgerRef, sp: LedgerSavePoint) = - if (not isNil(sp)) and (sp.state == Pending): - ac.rollback(sp) - -proc getAccount( - ac: AccountsLedgerRef; - address: Address; - shouldCreate = true; - ): AccountRef = - - # search account from layers of cache - var sp = ac.savePoint - while sp != nil: - result = sp.cache.getOrDefault(address) - if not result.isNil: - return - sp = sp.parentSavepoint - - if ac.cache.pop(address, result): - # Check second-level cache - ac.savePoint.cache[address] = result - return - - # not found in cache, look into state trie - let - accPath = address.toAccountKey - rc = ac.ledger.fetch accPath - if rc.isOk: - result = AccountRef( - statement: rc.value, - accPath: accPath, - flags: {Alive}) - elif shouldCreate: - result = AccountRef( - statement: CoreDbAccount( - nonce: emptyEthAccount.nonce, - balance: emptyEthAccount.balance, - codeHash: emptyEthAccount.codeHash), - accPath: accPath, - flags: {Alive, IsNew}) - else: - return # ignore, don't cache - - # cache the account - ac.savePoint.cache[address] = result - ac.savePoint.dirty[address] = result - -proc clone(acc: AccountRef, cloneStorage: bool): AccountRef = - result = AccountRef( - statement: acc.statement, - accPath: acc.accPath, - flags: acc.flags, - code: acc.code) - - if cloneStorage: - result.originalStorage = acc.originalStorage - # it's ok to clone a table this way - result.overlayStorage = acc.overlayStorage - -proc isEmpty(acc: AccountRef): bool = - acc.statement.nonce == 0 and - acc.statement.balance.isZero and - acc.statement.codeHash == EMPTY_CODE_HASH - -template exists(acc: AccountRef): bool = - Alive in acc.flags - -proc originalStorageValue( - acc: AccountRef; - slot: UInt256; - ac: AccountsLedgerRef; - ): UInt256 = - # share the same original storage between multiple - # versions of account - if acc.originalStorage.isNil: - acc.originalStorage = newTable[UInt256, UInt256]() - else: - acc.originalStorage[].withValue(slot, val) do: - return val[] - - # Not in the original values cache - go to the DB. - let - slotKey = ac.slots.get(slot).valueOr: - slot.toBytesBE.keccak256 - rc = ac.ledger.slotFetch(acc.toAccountKey, slotKey) - if rc.isOk: - result = rc.value - - acc.originalStorage[slot] = result - -proc storageValue( - acc: AccountRef; - slot: UInt256; - ac: AccountsLedgerRef; - ): UInt256 = - acc.overlayStorage.withValue(slot, val) do: - return val[] - do: - result = acc.originalStorageValue(slot, ac) - -proc kill(ac: AccountsLedgerRef, acc: AccountRef) = - acc.flags.excl Alive - acc.overlayStorage.clear() - acc.originalStorage = nil - ac.resetCoreDbAccount acc - acc.code.reset() - -type - PersistMode = enum - DoNothing - Update - Remove - -proc persistMode(acc: AccountRef): PersistMode = - result = DoNothing - if Alive in acc.flags: - if IsNew in acc.flags or Dirty in acc.flags: - result = Update - else: - if IsNew notin acc.flags: - result = Remove - -proc persistCode(acc: AccountRef, ac: AccountsLedgerRef) = - if acc.code.len != 0 and not acc.code.persisted: - let rc = ac.kvt.put( - contractHashKey(acc.statement.codeHash).toOpenArray, acc.code.bytes()) - if rc.isErr: - warn logTxt "persistCode()", - codeHash=acc.statement.codeHash, error=($$rc.error) - else: - # If the ledger changes rolled back entirely from the database, the ledger - # code cache must also be cleared! - acc.code.persisted = true - -proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) = - const info = "persistStorage(): " - - if acc.overlayStorage.len == 0: - # TODO: remove the storage too if we figure out - # how to create 'virtual' storage room for each account - return - - if acc.originalStorage.isNil: - acc.originalStorage = newTable[UInt256, UInt256]() - - # Make sure that there is an account entry on the database. This is needed by - # `Aristo` for updating the account's storage area reference. As a side effect, - # this action also updates the latest statement data. - ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr: - raiseAssert info & $$error - - # Save `overlayStorage[]` on database - for slot, value in acc.overlayStorage: - acc.originalStorage[].withValue(slot, v): - if v[] == value: - continue # Avoid writing A-B-A updates - - var cached = true - let slotKey = ac.slots.get(slot).valueOr: - cached = false - let hash = slot.toBytesBE.keccak256 - ac.slots.put(slot, hash) - hash - - if value > 0: - ac.ledger.slotMerge(acc.toAccountKey, slotKey, value).isOkOr: - raiseAssert info & $$error - - # move the overlayStorage to originalStorage, related to EIP2200, EIP1283 - acc.originalStorage[slot] = value - - else: - ac.ledger.slotDelete(acc.toAccountKey, slotKey).isOkOr: - if error.error != StoNotFound: - raiseAssert info & $$error - discard - acc.originalStorage.del(slot) - - if ac.storeSlotHash and not cached: - # Write only if it was not cached to avoid writing the same data over and - # over.. - let - key = slotKey.data.slotHashToSlotKey - rc = ac.kvt.put(key.toOpenArray, blobify(slot).data) - if rc.isErr: - warn logTxt "persistStorage()", slot, error=($$rc.error) - - acc.overlayStorage.clear() - -proc makeDirty(ac: AccountsLedgerRef, address: Address, cloneStorage = true): AccountRef = - ac.isDirty = true - result = ac.getAccount(address) - if address in ac.savePoint.cache: - # it's already in latest savepoint - result.flags.incl Dirty - ac.savePoint.dirty[address] = result - return - - # put a copy into latest savepoint - result = result.clone(cloneStorage) - result.flags.incl Dirty - ac.savePoint.cache[address] = result - ac.savePoint.dirty[address] = result - -proc getCodeHash*(ac: AccountsLedgerRef, address: Address): Hash32 = - let acc = ac.getAccount(address, false) - if acc.isNil: emptyEthAccount.codeHash - else: acc.statement.codeHash - -proc getBalance*(ac: AccountsLedgerRef, address: Address): UInt256 = - let acc = ac.getAccount(address, false) - if acc.isNil: emptyEthAccount.balance - else: acc.statement.balance - -proc getNonce*(ac: AccountsLedgerRef, address: Address): AccountNonce = - let acc = ac.getAccount(address, false) - if acc.isNil: emptyEthAccount.nonce - else: acc.statement.nonce - -proc getCode*(ac: AccountsLedgerRef, address: Address): CodeBytesRef = - # Always returns non-nil! - let acc = ac.getAccount(address, false) - if acc.isNil: - return CodeBytesRef() - - if acc.code == nil: - acc.code = - if acc.statement.codeHash != EMPTY_CODE_HASH: - ac.code.get(acc.statement.codeHash).valueOr: - var rc = ac.kvt.get(contractHashKey(acc.statement.codeHash).toOpenArray) - if rc.isErr: - warn logTxt "getCode()", codeHash=acc.statement.codeHash, error=($$rc.error) - CodeBytesRef() - else: - let newCode = CodeBytesRef.init(move(rc.value), persisted = true) - ac.code.put(acc.statement.codeHash, newCode) - newCode - else: - CodeBytesRef() - - acc.code - -proc getCodeSize*(ac: AccountsLedgerRef, address: Address): int = - let acc = ac.getAccount(address, false) - if acc.isNil: - return 0 - - if acc.code == nil: - if acc.statement.codeHash == EMPTY_CODE_HASH: - return 0 - acc.code = ac.code.get(acc.statement.codeHash).valueOr: - # On a cache miss, we don't fetch the code - instead, we fetch just the - # length - should the code itself be needed, it will typically remain - # cached and easily accessible in the database layer - this is to prevent - # EXTCODESIZE calls from messing up the code cache and thus causing - # recomputation of the jump destination table - var rc = ac.kvt.len(contractHashKey(acc.statement.codeHash).toOpenArray) - - return rc.valueOr: - warn logTxt "getCodeSize()", codeHash=acc.statement.codeHash, error=($$rc.error) - 0 - - acc.code.len() - -proc getCommittedStorage*(ac: AccountsLedgerRef, address: Address, slot: UInt256): UInt256 = - let acc = ac.getAccount(address, false) - if acc.isNil: - return - acc.originalStorageValue(slot, ac) - -proc getStorage*(ac: AccountsLedgerRef, address: Address, slot: UInt256): UInt256 = - let acc = ac.getAccount(address, false) - if acc.isNil: - return - acc.storageValue(slot, ac) - -proc contractCollision*(ac: AccountsLedgerRef, address: Address): bool = - let acc = ac.getAccount(address, false) - if acc.isNil: - return - acc.statement.nonce != 0 or - acc.statement.codeHash != EMPTY_CODE_HASH or - not ac.ledger.slotStateEmptyOrVoid(acc.toAccountKey) - -proc accountExists*(ac: AccountsLedgerRef, address: Address): bool = - let acc = ac.getAccount(address, false) - if acc.isNil: - return - acc.exists() - -proc isEmptyAccount*(ac: AccountsLedgerRef, address: Address): bool = - let acc = ac.getAccount(address, false) - doAssert not acc.isNil - doAssert acc.exists() - acc.isEmpty() - -proc isDeadAccount*(ac: AccountsLedgerRef, address: Address): bool = - let acc = ac.getAccount(address, false) - if acc.isNil: - return true - if not acc.exists(): - return true - acc.isEmpty() - -proc setBalance*(ac: AccountsLedgerRef, address: Address, balance: UInt256) = - let acc = ac.getAccount(address) - acc.flags.incl {Alive} - if acc.statement.balance != balance: - ac.makeDirty(address).statement.balance = balance - -proc addBalance*(ac: AccountsLedgerRef, address: Address, delta: UInt256) = - # EIP161: We must check emptiness for the objects such that the account - # clearing (0,0,0 objects) can take effect. - if delta.isZero: - let acc = ac.getAccount(address) - if acc.isEmpty: - ac.makeDirty(address).flags.incl Touched - return - ac.setBalance(address, ac.getBalance(address) + delta) - -proc subBalance*(ac: AccountsLedgerRef, address: Address, delta: UInt256) = - if delta.isZero: - # This zero delta early exit is important as shown in EIP-4788. - # If the account is created, it will change the state. - # But early exit will prevent the account creation. - # In this case, the SYSTEM_ADDRESS - return - ac.setBalance(address, ac.getBalance(address) - delta) - -proc setNonce*(ac: AccountsLedgerRef, address: Address, nonce: AccountNonce) = - let acc = ac.getAccount(address) - acc.flags.incl {Alive} - if acc.statement.nonce != nonce: - ac.makeDirty(address).statement.nonce = nonce - -proc incNonce*(ac: AccountsLedgerRef, address: Address) = - ac.setNonce(address, ac.getNonce(address) + 1) - -proc setCode*(ac: AccountsLedgerRef, address: Address, code: seq[byte]) = - let acc = ac.getAccount(address) - acc.flags.incl {Alive} - let codeHash = keccak256(code) - if acc.statement.codeHash != codeHash: - var acc = ac.makeDirty(address) - acc.statement.codeHash = codeHash - # Try to reuse cache entry if it exists, but don't save the code - it's not - # a given that it will be executed within LRU range - acc.code = ac.code.get(codeHash).valueOr(CodeBytesRef.init(code)) - acc.flags.incl CodeChanged - -proc setStorage*(ac: AccountsLedgerRef, address: Address, slot, value: UInt256) = - let acc = ac.getAccount(address) - acc.flags.incl {Alive} - let oldValue = acc.storageValue(slot, ac) - if oldValue != value: - var acc = ac.makeDirty(address) - acc.overlayStorage[slot] = value - acc.flags.incl StorageChanged - -proc clearStorage*(ac: AccountsLedgerRef, address: Address) = - const info = "clearStorage(): " - - # a.k.a createStateObject. If there is an existing account with - # the given address, it is overwritten. - - let acc = ac.getAccount(address) - acc.flags.incl {Alive, NewlyCreated} - - let empty = ac.ledger.slotStateEmpty(acc.toAccountKey).valueOr: return - if not empty: - # need to clear the storage from the database first - let acc = ac.makeDirty(address, cloneStorage = false) - ac.ledger.clearStorage(acc.toAccountKey).isOkOr: - raiseAssert info & $$error - # update caches - if acc.originalStorage.isNil.not: - # also clear originalStorage cache, otherwise - # both getStorage and getCommittedStorage will - # return wrong value - acc.originalStorage.clear() - -proc deleteAccount*(ac: AccountsLedgerRef, address: Address) = - # make sure all savepoints already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - let acc = ac.getAccount(address) - ac.savePoint.dirty[address] = acc - ac.kill acc - -proc selfDestruct*(ac: AccountsLedgerRef, address: Address) = - ac.setBalance(address, 0.u256) - ac.savePoint.selfDestruct.incl address - -proc selfDestruct6780*(ac: AccountsLedgerRef, address: Address) = - let acc = ac.getAccount(address, false) - if acc.isNil: - return - - if NewlyCreated in acc.flags: - ac.selfDestruct(address) - -proc selfDestructLen*(ac: AccountsLedgerRef): int = - ac.savePoint.selfDestruct.len - -proc addLogEntry*(ac: AccountsLedgerRef, log: Log) = - ac.savePoint.logEntries.add log - -proc getAndClearLogEntries*(ac: AccountsLedgerRef): seq[Log] = - swap(result, ac.savePoint.logEntries) - -proc ripemdSpecial*(ac: AccountsLedgerRef) = - ac.ripemdSpecial = true - -proc deleteEmptyAccount(ac: AccountsLedgerRef, address: Address) = - let acc = ac.getAccount(address, false) - if acc.isNil: - return - if not acc.isEmpty: - return - if not acc.exists: - return - - ac.savePoint.dirty[address] = acc - ac.kill acc - -proc clearEmptyAccounts(ac: AccountsLedgerRef) = - # https://github.com/ethereum/EIPs/blob/master/EIPS/eip-161.md - for acc in ac.savePoint.dirty.values(): - if Touched in acc.flags and - acc.isEmpty and acc.exists: - ac.kill acc - - # https://github.com/ethereum/EIPs/issues/716 - if ac.ripemdSpecial: - ac.deleteEmptyAccount(RIPEMD_ADDR) - ac.ripemdSpecial = false - -proc persist*(ac: AccountsLedgerRef, - clearEmptyAccount: bool = false, - clearCache = false) = - const info = "persist(): " - - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - - if clearEmptyAccount: - ac.clearEmptyAccounts() - - for address in ac.savePoint.selfDestruct: - ac.deleteAccount(address) - - for (eAddr,acc) in ac.savePoint.dirty.pairs(): # This is a hotspot in block processing - case acc.persistMode() - of Update: - if CodeChanged in acc.flags: - acc.persistCode(ac) - if StorageChanged in acc.flags: - acc.persistStorage(ac) - else: - # This one is only necessary unless `persistStorage()` is run which needs - # to `merge()` the latest statement as well. - ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr: - raiseAssert info & $$error - of Remove: - ac.ledger.delete(acc.toAccountKey).isOkOr: - if error.error != AccNotFound: - raiseAssert info & $$error - ac.savePoint.cache.del eAddr - of DoNothing: - # dead man tell no tales - # remove touched dead account from cache - if Alive notin acc.flags: - ac.savePoint.cache.del eAddr - - acc.flags = acc.flags - resetFlags - ac.savePoint.dirty.clear() - - if clearCache: - # This overwrites the cache from the previous persist, providing a crude LRU - # scheme with little overhead - # TODO https://github.com/nim-lang/Nim/issues/23759 - swap(ac.cache, ac.savePoint.cache) - ac.savePoint.cache.reset() - - ac.savePoint.selfDestruct.clear() - - # EIP2929 - ac.savePoint.accessList.clear() - - ac.isDirty = false - -iterator addresses*(ac: AccountsLedgerRef): Address = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - for address, _ in ac.savePoint.cache: - yield address - -iterator accounts*(ac: AccountsLedgerRef): Account = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - for _, acc in ac.savePoint.cache: - yield ac.ledger.recast( - acc.toAccountKey, acc.statement, updateOk=true).value - -iterator pairs*(ac: AccountsLedgerRef): (Address, Account) = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - for address, acc in ac.savePoint.cache: - yield (address, ac.ledger.recast( - acc.toAccountKey, acc.statement, updateOk=true).value) - -iterator storage*( - ac: AccountsLedgerRef; - eAddr: Address; - ): (UInt256, UInt256) = - # beware that if the account not persisted, - # the storage root will not be updated - for (slotHash, value) in ac.ledger.slotPairs eAddr.toAccountKey: - let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray) - if rc.isErr: - warn logTxt "storage()", slotHash, error=($$rc.error) - continue - let r = deblobify(rc.value, UInt256) - if r.isErr: - warn logTxt "storage.deblobify", slotHash, msg=r.error - continue - yield (r.value, value) - -iterator cachedStorage*(ac: AccountsLedgerRef, address: Address): (UInt256, UInt256) = - let acc = ac.getAccount(address, false) - if not acc.isNil: - if not acc.originalStorage.isNil: - for k, v in acc.originalStorage: - yield (k, v) - -proc getStorageRoot*(ac: AccountsLedgerRef, address: Address): Hash32 = - # beware that if the account not persisted, - # the storage root will not be updated - let acc = ac.getAccount(address, false) - if acc.isNil: EMPTY_ROOT_HASH - else: ac.ledger.slotState(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH - -proc update(wd: var WitnessData, acc: AccountRef) = - # once the code is touched make sure it doesn't get reset back to false in another update - if not wd.codeTouched: - wd.codeTouched = CodeChanged in acc.flags or acc.code != nil - - if not acc.originalStorage.isNil: - for k, v in acc.originalStorage: - if v.isZero: continue - wd.storageKeys.incl k - - for k, v in acc.overlayStorage: - wd.storageKeys.incl k - -proc witnessData(acc: AccountRef): WitnessData = - result.storageKeys = HashSet[UInt256]() - update(result, acc) - -proc collectWitnessData*(ac: AccountsLedgerRef) = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - # usually witness data is collected before we call persist() - for address, acc in ac.savePoint.cache: - ac.witnessCache.withValue(address, val) do: - update(val[], acc) - do: - ac.witnessCache[address] = witnessData(acc) - -func multiKeys(slots: HashSet[UInt256]): MultiKeysRef = - if slots.len == 0: return - new result - for x in slots: - result.add x.toBytesBE - result.sort() - -proc makeMultiKeys*(ac: AccountsLedgerRef): MultiKeysRef = - # this proc is called after we done executing a block - new result - for k, v in ac.witnessCache: - result.add(k, v.codeTouched, multiKeys(v.storageKeys)) - result.sort() - -proc accessList*(ac: AccountsLedgerRef, address: Address) = - ac.savePoint.accessList.add(address) - -proc accessList*(ac: AccountsLedgerRef, address: Address, slot: UInt256) = - ac.savePoint.accessList.add(address, slot) - -func inAccessList*(ac: AccountsLedgerRef, address: Address): bool = - var sp = ac.savePoint - while sp != nil: - result = sp.accessList.contains(address) - if result: - return - sp = sp.parentSavepoint - -func inAccessList*(ac: AccountsLedgerRef, address: Address, slot: UInt256): bool = - var sp = ac.savePoint - while sp != nil: - result = sp.accessList.contains(address, slot) - if result: - return - sp = sp.parentSavepoint - -func getTransientStorage*(ac: AccountsLedgerRef, - address: Address, slot: UInt256): UInt256 = - var sp = ac.savePoint - while sp != nil: - let (ok, res) = sp.transientStorage.getStorage(address, slot) - if ok: - return res - sp = sp.parentSavepoint - -proc setTransientStorage*(ac: AccountsLedgerRef, - address: Address, slot, val: UInt256) = - ac.savePoint.transientStorage.setStorage(address, slot, val) - -proc clearTransientStorage*(ac: AccountsLedgerRef) = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - ac.savePoint.transientStorage.clear() - -func getAccessList*(ac: AccountsLedgerRef): common.AccessList = - # make sure all savepoint already committed - doAssert(ac.savePoint.parentSavepoint.isNil) - ac.savePoint.accessList.getAccessList() - -proc getEthAccount*(ac: AccountsLedgerRef, address: Address): Account = - let acc = ac.getAccount(address, false) - if acc.isNil: - return emptyEthAccount - - ## Convert to legacy object, will throw an assert if that fails - let rc = ac.ledger.recast(acc.toAccountKey, acc.statement) - if rc.isErr: - raiseAssert "getAccount(): cannot convert account: " & $$rc.error - rc.value - -proc getAccountProof*(ac: AccountsLedgerRef, address: Address): seq[seq[byte]] = - let accProof = ac.ledger.proof(address.toAccountKey).valueOr: - raiseAssert "Failed to get account proof: " & $$error - - accProof[0] - -proc getStorageProof*(ac: AccountsLedgerRef, address: Address, slots: openArray[UInt256]): seq[seq[seq[byte]]] = - var storageProof = newSeqOfCap[seq[seq[byte]]](slots.len) - - let - addressHash = address.toAccountKey - accountExists = ac.ledger.hasPath(addressHash).valueOr: - raiseAssert "Call to hasPath failed: " & $$error - - for slot in slots: - if not accountExists: - storageProof.add(@[]) - continue - - let - slotKey = ac.slots.get(slot).valueOr: - slot.toBytesBE.keccak256 - slotProof = ac.ledger.slotProof(addressHash, slotKey).valueOr: - if error.aErr == FetchPathNotFound: - storageProof.add(@[]) - continue - else: - raiseAssert "Failed to get slot proof: " & $$error - storageProof.add(slotProof[0]) - - storageProof - -proc getStateRoot*(db: ReadOnlyStateDB): Hash32 {.borrow.} -proc getCodeHash*(db: ReadOnlyStateDB, address: Address): Hash32 = getCodeHash(distinctBase db, address) -proc getStorageRoot*(db: ReadOnlyStateDB, address: Address): Hash32 = getStorageRoot(distinctBase db, address) -proc getBalance*(db: ReadOnlyStateDB, address: Address): UInt256 = getBalance(distinctBase db, address) -proc getStorage*(db: ReadOnlyStateDB, address: Address, slot: UInt256): UInt256 = getStorage(distinctBase db, address, slot) -proc getNonce*(db: ReadOnlyStateDB, address: Address): AccountNonce = getNonce(distinctBase db, address) -proc getCode*(db: ReadOnlyStateDB, address: Address): CodeBytesRef = getCode(distinctBase db, address) -proc getCodeSize*(db: ReadOnlyStateDB, address: Address): int = getCodeSize(distinctBase db, address) -proc contractCollision*(db: ReadOnlyStateDB, address: Address): bool = contractCollision(distinctBase db, address) -proc accountExists*(db: ReadOnlyStateDB, address: Address): bool = accountExists(distinctBase db, address) -proc isDeadAccount*(db: ReadOnlyStateDB, address: Address): bool = isDeadAccount(distinctBase db, address) -proc isEmptyAccount*(db: ReadOnlyStateDB, address: Address): bool = isEmptyAccount(distinctBase db, address) -proc getCommittedStorage*(db: ReadOnlyStateDB, address: Address, slot: UInt256): UInt256 = getCommittedStorage(distinctBase db, address, slot) -proc inAccessList*(db: ReadOnlyStateDB, address: Address): bool = inAccessList(distinctBase db, address) -proc inAccessList*(db: ReadOnlyStateDB, address: Address, slot: UInt256): bool = inAccessList(distinctBase db, address) -proc getTransientStorage*(db: ReadOnlyStateDB, - address: Address, slot: UInt256): UInt256 = getTransientStorage(distinctBase db, address, slot) -proc getAccountProof*(db: ReadOnlyStateDB, address: Address): seq[seq[byte]] = getAccountProof(distinctBase db, address) -proc getStorageProof*(db: ReadOnlyStateDB, address: Address, slots: openArray[UInt256]): seq[seq[seq[byte]]] = getStorageProof(distinctBase db, address, slots) diff --git a/nimbus/db/ledger/base.nim b/nimbus/db/ledger/base.nim deleted file mode 100644 index d8fc3ff125..0000000000 --- a/nimbus/db/ledger/base.nim +++ /dev/null @@ -1,329 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -## Ledger management APIs. - -{.push raises: [].} - -import - std/typetraits, - eth/common/[addresses, hashes], - ../../evm/code_bytes, - ../../stateless/multi_keys, - ../core_db, - ./backend/accounts_ledger, - ./base/[api_tracking, base_config, base_desc] - -type - ReadOnlyStateDB* = distinct LedgerRef - -export - code_bytes, - LedgerRef, - LedgerSpRef - -# ------------------------------------------------------------------------------ -# Logging/tracking helpers (some public) -# ------------------------------------------------------------------------------ - -when LedgerEnableApiTracking: - import - std/times, - chronicles - logScope: - topics = "ledger" - const - apiTxt = "API" - -when LedgerEnableApiProfiling: - export - LedgerFnInx, - LedgerProfListRef - -# ------------------------------------------------------------------------------ -# Public methods -# ------------------------------------------------------------------------------ - -proc accessList*(ldg: LedgerRef, eAddr: Address) = - ldg.beginTrackApi LdgAccessListFn - ldg.ac.accessList(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr) - -proc accessList*(ldg: LedgerRef, eAddr: Address, slot: UInt256) = - ldg.beginTrackApi LdgAccessListFn - ldg.ac.accessList(eAddr, slot) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot - -proc accountExists*(ldg: LedgerRef, eAddr: Address): bool = - ldg.beginTrackApi LdgAccountExistsFn - result = ldg.ac.accountExists(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc addBalance*(ldg: LedgerRef, eAddr: Address, delta: UInt256) = - ldg.beginTrackApi LdgAddBalanceFn - ldg.ac.addBalance(eAddr, delta) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), delta - -proc addLogEntry*(ldg: LedgerRef, log: Log) = - ldg.beginTrackApi LdgAddLogEntryFn - ldg.ac.addLogEntry(log) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc beginSavepoint*(ldg: LedgerRef): LedgerSpRef = - ldg.beginTrackApi LdgBeginSavepointFn - result = ldg.ac.beginSavepoint() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc clearStorage*(ldg: LedgerRef, eAddr: Address) = - ldg.beginTrackApi LdgClearStorageFn - ldg.ac.clearStorage(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr) - -proc clearTransientStorage*(ldg: LedgerRef) = - ldg.beginTrackApi LdgClearTransientStorageFn - ldg.ac.clearTransientStorage() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc collectWitnessData*(ldg: LedgerRef) = - ldg.beginTrackApi LdgCollectWitnessDataFn - ldg.ac.collectWitnessData() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc commit*(ldg: LedgerRef, sp: LedgerSpRef) = - ldg.beginTrackApi LdgCommitFn - ldg.ac.commit(sp) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc deleteAccount*(ldg: LedgerRef, eAddr: Address) = - ldg.beginTrackApi LdgDeleteAccountFn - ldg.ac.deleteAccount(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr) - -proc dispose*(ldg: LedgerRef, sp: LedgerSpRef) = - ldg.beginTrackApi LdgDisposeFn - ldg.ac.dispose(sp) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc getAndClearLogEntries*(ldg: LedgerRef): seq[Log] = - ldg.beginTrackApi LdgGetAndClearLogEntriesFn - result = ldg.ac.getAndClearLogEntries() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc getBalance*(ldg: LedgerRef, eAddr: Address): UInt256 = - ldg.beginTrackApi LdgGetBalanceFn - result = ldg.ac.getBalance(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc getCode*(ldg: LedgerRef, eAddr: Address): CodeBytesRef = - ldg.beginTrackApi LdgGetCodeFn - result = ldg.ac.getCode(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc getCodeHash*(ldg: LedgerRef, eAddr: Address): Hash32 = - ldg.beginTrackApi LdgGetCodeHashFn - result = ldg.ac.getCodeHash(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result=($$result) - -proc getCodeSize*(ldg: LedgerRef, eAddr: Address): int = - ldg.beginTrackApi LdgGetCodeSizeFn - result = ldg.ac.getCodeSize(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc getCommittedStorage*( - ldg: LedgerRef; - eAddr: Address; - slot: UInt256; - ): UInt256 = - ldg.beginTrackApi LdgGetCommittedStorageFn - result = ldg.ac.getCommittedStorage(eAddr, slot) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result - -proc getNonce*(ldg: LedgerRef, eAddr: Address): AccountNonce = - ldg.beginTrackApi LdgGetNonceFn - result = ldg.ac.getNonce(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc getStorage*(ldg: LedgerRef, eAddr: Address, slot: UInt256): UInt256 = - ldg.beginTrackApi LdgGetStorageFn - result = ldg.ac.getStorage(eAddr, slot) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result - -proc getStorageRoot*(ldg: LedgerRef, eAddr: Address): Hash32 = - ldg.beginTrackApi LdgGetStorageRootFn - result = ldg.ac.getStorageRoot(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result=($$result) - -proc getTransientStorage*( - ldg: LedgerRef; - eAddr: Address; - slot: UInt256; - ): UInt256 = - ldg.beginTrackApi LdgGetTransientStorageFn - result = ldg.ac.getTransientStorage(eAddr, slot) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result - -proc contractCollision*(ldg: LedgerRef, eAddr: Address): bool = - ldg.beginTrackApi LdgContractCollisionFn - result = ldg.ac.contractCollision(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc inAccessList*(ldg: LedgerRef, eAddr: Address): bool = - ldg.beginTrackApi LdgInAccessListFn - result = ldg.ac.inAccessList(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc inAccessList*(ldg: LedgerRef, eAddr: Address, slot: UInt256): bool = - ldg.beginTrackApi LdgInAccessListFn - result = ldg.ac.inAccessList(eAddr, slot) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result - -proc incNonce*(ldg: LedgerRef, eAddr: Address) = - ldg.beginTrackApi LdgIncNonceFn - ldg.ac.incNonce(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr) - -proc isDeadAccount*(ldg: LedgerRef, eAddr: Address): bool = - ldg.beginTrackApi LdgIsDeadAccountFn - result = ldg.ac.isDeadAccount(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc isEmptyAccount*(ldg: LedgerRef, eAddr: Address): bool = - ldg.beginTrackApi LdgIsEmptyAccountFn - result = ldg.ac.isEmptyAccount(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result - -proc isTopLevelClean*(ldg: LedgerRef): bool = - ldg.beginTrackApi LdgIsTopLevelCleanFn - result = ldg.ac.isTopLevelClean() - ldg.ifTrackApi: debug apiTxt, api, elapsed, result - -proc makeMultiKeys*(ldg: LedgerRef): MultiKeysRef = - ldg.beginTrackApi LdgMakeMultiKeysFn - result = ldg.ac.makeMultiKeys() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc persist*(ldg: LedgerRef, clearEmptyAccount = false, clearCache = false) = - ldg.beginTrackApi LdgPersistFn - ldg.ac.persist(clearEmptyAccount, clearCache) - ldg.ifTrackApi: debug apiTxt, api, elapsed, clearEmptyAccount, clearCache - -proc ripemdSpecial*(ldg: LedgerRef) = - ldg.beginTrackApi LdgRipemdSpecialFn - ldg.ac.ripemdSpecial() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc rollback*(ldg: LedgerRef, sp: LedgerSpRef) = - ldg.beginTrackApi LdgRollbackFn - ldg.ac.rollback(sp) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc safeDispose*(ldg: LedgerRef, sp: LedgerSpRef) = - ldg.beginTrackApi LdgSafeDisposeFn - ldg.ac.safeDispose(sp) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc selfDestruct*(ldg: LedgerRef, eAddr: Address) = - ldg.beginTrackApi LdgSelfDestructFn - ldg.ac.selfDestruct(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc selfDestruct6780*(ldg: LedgerRef, eAddr: Address) = - ldg.beginTrackApi LdgSelfDestruct6780Fn - ldg.ac.selfDestruct6780(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc selfDestructLen*(ldg: LedgerRef): int = - ldg.beginTrackApi LdgSelfDestructLenFn - result = ldg.ac.selfDestructLen() - ldg.ifTrackApi: debug apiTxt, api, elapsed, result - -proc setBalance*(ldg: LedgerRef, eAddr: Address, balance: UInt256) = - ldg.beginTrackApi LdgSetBalanceFn - ldg.ac.setBalance(eAddr, balance) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), balance - -proc setCode*(ldg: LedgerRef, eAddr: Address, code: seq[byte]) = - ldg.beginTrackApi LdgSetCodeFn - ldg.ac.setCode(eAddr, code) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), code - -proc setNonce*(ldg: LedgerRef, eAddr: Address, nonce: AccountNonce) = - ldg.beginTrackApi LdgSetNonceFn - ldg.ac.setNonce(eAddr, nonce) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), nonce - -proc setStorage*(ldg: LedgerRef, eAddr: Address, slot, val: UInt256) = - ldg.beginTrackApi LdgSetStorageFn - ldg.ac.setStorage(eAddr, slot, val) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, val - -proc setTransientStorage*( - ldg: LedgerRef; - eAddr: Address; - slot: UInt256; - val: UInt256; - ) = - ldg.beginTrackApi LdgSetTransientStorageFn - ldg.ac.setTransientStorage(eAddr, slot, val) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, val - -proc getStateRoot*(ldg: LedgerRef): Hash32 = - ldg.beginTrackApi LdgStateFn - result = ldg.ac.getStateRoot() - ldg.ifTrackApi: debug apiTxt, api, elapsed, result - -proc subBalance*(ldg: LedgerRef, eAddr: Address, delta: UInt256) = - ldg.beginTrackApi LdgSubBalanceFn - ldg.ac.subBalance(eAddr, delta) - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), delta - -proc getAccessList*(ldg: LedgerRef): AccessList = - ldg.beginTrackApi LdgGetAccessListFn - result = ldg.ac.getAccessList() - ldg.ifTrackApi: debug apiTxt, api, elapsed - -proc getEthAccount*(ldg: LedgerRef, eAddr: Address): Account = - ldg.beginTrackApi LdgGetAthAccountFn - result = ldg.ac.getEthAccount(eAddr) - ldg.ifTrackApi: debug apiTxt, api, elapsed, result - -proc getAccountProof*(ldg: LedgerRef, eAddr: Address): seq[seq[byte]] = - result = ldg.ac.getAccountProof(eAddr) - -proc getStorageProof*(ldg: LedgerRef, eAddr: Address, slots: openArray[UInt256]): seq[seq[seq[byte]]] = - result = ldg.ac.getStorageProof(eAddr, slots) - -# ------------------------------------------------------------------------------ -# Public virtual read-only methods -# ------------------------------------------------------------------------------ - -proc getStateRoot*(db: ReadOnlyStateDB): Hash32 {.borrow.} -proc getCodeHash*(db: ReadOnlyStateDB, address: Address): Hash32 = getCodeHash(distinctBase db, address) -proc getStorageRoot*(db: ReadOnlyStateDB, address: Address): Hash32 = getStorageRoot(distinctBase db, address) -proc getBalance*(db: ReadOnlyStateDB, address: Address): UInt256 = getBalance(distinctBase db, address) -proc getStorage*(db: ReadOnlyStateDB, address: Address, slot: UInt256): UInt256 = getStorage(distinctBase db, address, slot) -proc getNonce*(db: ReadOnlyStateDB, address: Address): AccountNonce = getNonce(distinctBase db, address) -proc getCode*(db: ReadOnlyStateDB, address: Address): CodeBytesRef = getCode(distinctBase db, address) -proc getCodeSize*(db: ReadOnlyStateDB, address: Address): int = getCodeSize(distinctBase db, address) -proc contractCollision*(db: ReadOnlyStateDB, address: Address): bool = contractCollision(distinctBase db, address) -proc accountExists*(db: ReadOnlyStateDB, address: Address): bool = accountExists(distinctBase db, address) -proc isDeadAccount*(db: ReadOnlyStateDB, address: Address): bool = isDeadAccount(distinctBase db, address) -proc isEmptyAccount*(db: ReadOnlyStateDB, address: Address): bool = isEmptyAccount(distinctBase db, address) -proc getCommittedStorage*(db: ReadOnlyStateDB, address: Address, slot: UInt256): UInt256 = getCommittedStorage(distinctBase db, address, slot) -proc inAccessList*(db: ReadOnlyStateDB, address: Address): bool = inAccessList(distinctBase db, address) -proc inAccessList*(db: ReadOnlyStateDB, address: Address, slot: UInt256): bool = inAccessList(distinctBase db, address) -proc getTransientStorage*(db: ReadOnlyStateDB, - address: Address, slot: UInt256): UInt256 = getTransientStorage(distinctBase db, address, slot) -proc getAccountProof*(db: ReadOnlyStateDB, address: Address): seq[seq[byte]] = getAccountProof(distinctBase db, address) -proc getStorageProof*(db: ReadOnlyStateDB, address: Address, slots: openArray[UInt256]): seq[seq[seq[byte]]] = getStorageProof(distinctBase db, address, slots) - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/ledger/base/api_tracking.nim b/nimbus/db/ledger/base/api_tracking.nim deleted file mode 100644 index 17b150724b..0000000000 --- a/nimbus/db/ledger/base/api_tracking.nim +++ /dev/null @@ -1,148 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -{.push raises: [].} - -import - std/[strutils, times], - eth/common, - stew/byteutils, - ../../../evm/code_bytes, - ../../aristo/aristo_profile, - ../../core_db, - "."/[base_config, base_desc] - -type - Elapsed* = distinct Duration - ## Needed for local `$` as it would be ambiguous for `Duration` - - LedgerFnInx* = enum - ## Profiling table index - SummaryItem = "total" - - LdgAccessListFn = "accessList" - LdgAccountExistsFn = "accountExists" - LdgAddBalanceFn = "addBalance" - LdgAddLogEntryFn = "addLogEntry" - LdgBeginSavepointFn = "beginSavepoint" - LdgClearStorageFn = "clearStorage" - LdgClearTransientStorageFn = "clearTransientStorage" - LdgCollectWitnessDataFn = "collectWitnessData" - LdgCommitFn = "commit" - LdgContractCollisionFn = "contractCollision" - LdgDeleteAccountFn = "deleteAccount" - LdgDisposeFn = "dispose" - LdgGetAccessListFn = "getAcessList" - LdgGetAccountFn = "getAccount" - LdgGetAndClearLogEntriesFn = "getAndClearLogEntries" - LdgGetBalanceFn = "getBalance" - LdgGetCodeFn = "getCode" - LdgGetCodeHashFn = "getCodeHash" - LdgGetCodeSizeFn = "getCodeSize" - LdgGetCommittedStorageFn = "getCommittedStorage" - LdgGetNonceFn = "getNonce" - LdgGetStorageFn = "getStorage" - LdgGetStorageRootFn = "getStorageRoot" - LdgGetTransientStorageFn = "getTransientStorage" - LdgGetAthAccountFn = "getEthAccount" - LdgInAccessListFn = "inAccessList" - LdgIncNonceFn = "incNonce" - LdgIsDeadAccountFn = "isDeadAccount" - LdgIsEmptyAccountFn = "isEmptyAccount" - LdgIsTopLevelCleanFn = "isTopLevelClean" - LdgMakeMultiKeysFn = "makeMultiKeys" - LdgPersistFn = "persist" - LdgRipemdSpecialFn = "ripemdSpecial" - LdgRollbackFn = "rollback" - LdgSafeDisposeFn = "safeDispose" - LdgSelfDestruct6780Fn = "selfDestruct6780" - LdgSelfDestructFn = "selfDestruct" - LdgSelfDestructLenFn = "selfDestructLen" - LdgSetBalanceFn = "setBalance" - LdgSetCodeFn = "setCode" - LdgSetNonceFn = "setNonce" - LdgSetStorageFn = "setStorage" - LdgSetTransientStorageFn = "setTransientStorage" - LdgStateFn = "state" - LdgSubBalanceFn = "subBalance" - - LdgAccountsIt = "accounts" - LdgAdressesIt = "addresses" - LdgCachedStorageIt = "cachedStorage" - LdgPairsIt = "pairs" - LdgStorageIt = "storage" - -# ------------------------------------------------------------------------------ -# Private helpers -# ------------------------------------------------------------------------------ - -func oaToStr(w: openArray[byte]): string = - w.toHex.toLowerAscii - -func toStr(w: Address): string = - w.toHex - -func toStr(w: Hash32): string = - w.toHex - -func toStr(w: CodeBytesRef): string = - if w.isNil: "nil" - else: "[" & $w.bytes.len & "]" - -func toStr(w: seq[byte]): string = - if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">" - else: "seq[byte][" & $w.len & "]" - -func toStr(w: seq[Log]): string = - "Logs[" & $w.len & "]" - -func toStr(ela: Duration): string = - aristo_profile.toStr(ela) - -# ------------------------------------------------------------------------------ -# Public API logging helpers -# ------------------------------------------------------------------------------ - -func `$`*(w: CodeBytesRef): string {.used.} = w.toStr -func `$`*(e: Elapsed): string = e.Duration.toStr -func `$`*(l: seq[Log]): string = l.toStr -func `$`*(b: seq[byte]): string = b.toStr -func `$$`*(a: Address): string = a.toStr # otherwise collision w/existing `$` -func `$$`*(h: Hash32): string = h.toStr # otherwise collision w/existing `$` - -# ------------------------------------------------------------------------------ -# Public API logging framework -# ------------------------------------------------------------------------------ - -template beginTrackApi*(ldg: LedgerRef; s: LedgerFnInx) = - when LedgerEnableApiTracking: - const api {.inject,used.} = s # Generally available - let baStart {.inject.} = getTime() # Local use only - -template ifTrackApi*(ldg: LedgerRef; code: untyped) = - when LedgerEnableApiTracking: - when LedgerEnableApiProfiling: - let elapsed {.inject,used.} = (getTime() - baStart).Elapsed - aristo_profile.update(ldg.profTab, api.ord, elapsed.Duration) - if ldg.trackApi: - when not LedgerEnableApiProfiling: # otherwise use variable above - let elapsed {.inject,used.} = (getTime() - baStart).Elapsed - code - -# ------------------------------------------------------------------------------ -# Public helpers -# ------------------------------------------------------------------------------ - -func init*(T: type LedgerProfListRef): T = - T(list: newSeq[LedgerProfData](1 + high(LedgerFnInx).ord)) - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/ledger/base/base_config.nim b/nimbus/db/ledger/base/base_config.nim deleted file mode 100644 index 07ba08d0d2..0000000000 --- a/nimbus/db/ledger/base/base_config.nim +++ /dev/null @@ -1,48 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -{.push raises: [].} - -import - ../../core_db/base/base_config - -# Configuration section -const - EnableApiTracking = false - ## When enabled, API functions are logged. Tracking is enabled by setting - ## the `trackApi` flag to `true`. This setting is typically inherited from - ## the `CoreDb` descriptor flag `trackLedgerApi` (which is only available - ## if the flag `CoreDbEnableApiTracking` is set `true`. - - EnableApiProfiling = false - ## Enable API functions profiling. This setting is only effective if the - ## flag `CoreDbEnableApiJumpTable` is set `true`. - -# Exportable constants (leave alone this section) -const - LedgerEnableApiTracking* = EnableApiTracking and CoreDbEnableApiTracking - LedgerEnableApiProfiling* = EnableApiProfiling and CoreDbEnableApiJumpTable - - -# Support warning about extra compile time options. For production, non of -# the above features should be enabled. -import strutils -const ledgerBaseConfigExtras* = block: - var s: seq[string] - when LedgerEnableApiTracking: - s.add "logging" - when LedgerEnableApiProfiling: - s.add "profiling" - if s.len == 0: - "" - else: - "Ledger(" & s.join(", ") & ")" - -# End diff --git a/nimbus/db/ledger/base/base_desc.nim b/nimbus/db/ledger/base/base_desc.nim deleted file mode 100644 index 450f912e34..0000000000 --- a/nimbus/db/ledger/base/base_desc.nim +++ /dev/null @@ -1,33 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -{.push raises: [].} - -import - ../../aristo/aristo_profile, - ../backend/accounts_ledger - -type - LedgerProfListRef* = AristoDbProfListRef - ## Borrowed from `aristo_profile`, only used in profiling mode - - LedgerProfData* = AristoDbProfData - ## Borrowed from `aristo_profile`, only used in profiling mode - - LedgerSpRef* = LedgerSavePoint - ## Object for check point or save point - - LedgerRef* = ref object of RootRef - ## Root object with closures - trackApi*: bool ## For debugging - profTab*: LedgerProfListRef ## Profiling data (if any) - ac*: AccountsLedgerRef - -# End diff --git a/nimbus/db/ledger/base/base_helpers.nim b/nimbus/db/ledger/base/base_helpers.nim deleted file mode 100644 index 7ae7d09d0e..0000000000 --- a/nimbus/db/ledger/base/base_helpers.nim +++ /dev/null @@ -1,43 +0,0 @@ -# Nimbus -# Copyright (c) 2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -import - ../../core_db, - "."/[base_config, base_desc] - -# ------------------------------------------------------------------------------ -# Public constructor helper -# ------------------------------------------------------------------------------ - -when LedgerEnableApiProfiling: - import api_tracking - - proc ldgProfData*(db: CoreDbRef): LedgerProfListRef = - ## Return profiling data table (only available in profiling mode). If - ## available (i.e. non-nil), result data can be organised by the functions - ## available with `aristo_profile`. - ## - ## Note that profiling these data have accumulated over several ledger - ## sessions running on the same `CoreDb` instance. - ## - if db.ledgerHook.isNil: - db.ledgerHook = LedgerProfListRef.init() - cast[LedgerProfListRef](db.ledgerHook) - -proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef = - when LedgerEnableApiTracking: - ldg.trackApi = db.trackLedgerApi - when LedgerEnableApiProfiling: - ldg.profTab = db.ldgProfData() - ldg - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/ledger/base_iterators.nim b/nimbus/db/ledger/base_iterators.nim deleted file mode 100644 index 25a3173575..0000000000 --- a/nimbus/db/ledger/base_iterators.nim +++ /dev/null @@ -1,71 +0,0 @@ -# Nimbus -# Copyright (c) 2023-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed -# except according to those terms. - -{.push raises: [].} - -import - eth/common, - ../core_db, - ./backend/accounts_ledger, - ./base/[api_tracking, base_config, base_desc] - -when LedgerEnableApiTracking: - import - std/times, - chronicles - logScope: - topics = "ledger" - const - apiTxt = "API" - -# ------------------------------------------------------------------------------ -# Public iterators -# ------------------------------------------------------------------------------ - -iterator accounts*(ldg: LedgerRef): Account = - ldg.beginTrackApi LdgAccountsIt - for w in ldg.ac.accounts(): - yield w - ldg.ifTrackApi: debug apiTxt, api, elapsed - - -iterator addresses*(ldg: LedgerRef): Address = - ldg.beginTrackApi LdgAdressesIt - for w in ldg.ac.addresses(): - yield w - ldg.ifTrackApi: debug apiTxt, api, elapsed - - -iterator cachedStorage*(ldg: LedgerRef, eAddr: Address): (UInt256,UInt256) = - ldg.beginTrackApi LdgCachedStorageIt - for w in ldg.ac.cachedStorage(eAddr): - yield w - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr) - - -iterator pairs*(ldg: LedgerRef): (Address,Account) = - ldg.beginTrackApi LdgPairsIt - for w in ldg.ac.pairs(): - yield w - ldg.ifTrackApi: debug apiTxt, api, elapsed - - -iterator storage*( - ldg: LedgerRef; - eAddr: Address; - ): (UInt256,UInt256) = - ldg.beginTrackApi LdgStorageIt - for w in ldg.ac.storage(eAddr): - yield w - ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr) - -# ------------------------------------------------------------------------------ -# End -# ------------------------------------------------------------------------------ diff --git a/nimbus/db/opts.nim b/nimbus/db/opts.nim index 3e0a15d970..d0c7d8231a 100644 --- a/nimbus/db/opts.nim +++ b/nimbus/db/opts.nim @@ -22,7 +22,7 @@ const ## The row cache is disabled by default as the rdb lru caches do a better ## job at a similar abstraction level - ie they work at the same granularity ## as the rocksdb row cache but with less overhead - defaultBlockCacheSize* = 4 * 1024 * 1024 * 1024 + defaultBlockCacheSize* = 1024 * 1024 * 1024 * 5 div 2 ## The block cache is used to cache indicies, ribbon filters and ## decompressed data, roughly in that priority order. At the time of writing ## we have about 2 giga-entries in the MPT - with the ribbon filter @@ -32,10 +32,8 @@ const ## MPT root computations suffer because of filter evictions and subsequent ## re-reads from file. ## - ## We have two "principal" tables, AriVtx and AriKey - since they live in - ## separate column families, they have separate filters meaning that we need - ## double the total index and ribbon filter size for good performance. - defaultRdbVtxCacheSize* = 512 * 1024 * 1024 + ## A bit of space on top of the filter is left for data block caching + defaultRdbVtxCacheSize* = 768 * 1024 * 1024 ## Cache of branches and leaves in the state MPTs (world and account) defaultRdbKeyCacheSize* = 256 * 1024 * 1024 ## Hashes of the above diff --git a/nimbus/db/storage_types.nim b/nimbus/db/storage_types.nim index e8e29ab0e3..392048c677 100644 --- a/nimbus/db/storage_types.nim +++ b/nimbus/db/storage_types.nim @@ -21,7 +21,7 @@ type canonicalHeadHash = 4 slotHashToSlot = 5 contractHash = 6 - transitionStatus = 7 + dataDirId = 7 safeHash = 8 finalizedHash = 9 beaconState = 10 @@ -59,6 +59,10 @@ func canonicalHeadHashKey*(): DbKey {.inline.} = result.data[0] = byte ord(canonicalHeadHash) result.dataEndPos = 1 +func dataDirIdKey*(): DbKey {.inline.} = + result.data[0] = byte ord(dataDirId) + result.dataEndPos = 1 + func slotHashToSlotKey*(h: openArray[byte]): DbKey {.inline.} = doAssert(h.len == 32) result.data[0] = byte ord(slotHashToSlot) diff --git a/nimbus/evm/code_stream.nim b/nimbus/evm/code_stream.nim index ec8349a178..f6d1b0d2ef 100644 --- a/nimbus/evm/code_stream.nim +++ b/nimbus/evm/code_stream.nim @@ -53,14 +53,19 @@ func readVmWord*(c: var CodeStream, n: static int): UInt256 = func len*(c: CodeStream): int = len(c.code) -func next*(c: var CodeStream): Op {.inline.} = - # The extra >= 0 check helps eliminate `IndexDefect` from the optimized code - # which keeps this hotspot in the EVM small, code-size-wise - if c.pc >= 0 and c.pc < c.code.len: - result = Op(c.code.bytes[c.pc]) - inc c.pc +template next*(c: var CodeStream): Op = + # Retrieve the next opcode (or stop) - this is a hot spot in the interpreter + # and must be kept small for performance + let + # uint: range checked manually -> benefit from smaller codegen + pc = uint(c.pc) + bytes {.cursor.} = c.code.bytes + if pc < uint(bytes.len): + let op = Op(bytes[pc]) + c.pc = cast[int](pc + 1) + op else: - result = Op.Stop + Op.Stop iterator items*(c: var CodeStream): Op = var nextOpcode = c.next() diff --git a/nimbus/evm/computation.nim b/nimbus/evm/computation.nim index d23ca55ff8..c19657e14f 100644 --- a/nimbus/evm/computation.nim +++ b/nimbus/evm/computation.nim @@ -451,9 +451,9 @@ func traceError*(c: Computation) = func prepareTracer*(c: Computation) = c.vmState.capturePrepare(c, c.msg.depth) -func opcodeGasCost*( +template opcodeGasCost*( c: Computation, op: Op, gasCost: static GasInt, tracingEnabled: static bool, - reason: static string): EvmResultVoid {.inline.} = + reason: static string): EvmResultVoid = # Special case of the opcodeGasCost function used for fixed-gas opcodes - since # the parameters are known at compile time, we inline and specialize it when tracingEnabled: @@ -465,16 +465,17 @@ func opcodeGasCost*( c.msg.depth + 1) c.gasMeter.consumeGas(gasCost, reason) -func opcodeGasCost*( +template opcodeGasCost*( c: Computation, op: Op, gasCost: GasInt, reason: static string): EvmResultVoid = + let cost = gasCost if c.vmState.tracingEnabled: c.vmState.captureGasCost( c, op, - gasCost, + cost, c.gasMeter.gasRemaining, c.msg.depth + 1) - c.gasMeter.consumeGas(gasCost, reason) + c.gasMeter.consumeGas(cost, reason) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/evm/interpreter/evmc_gas_costs.nim b/nimbus/evm/interpreter/evmc_gas_costs.nim index d1460391bf..56addde804 100644 --- a/nimbus/evm/interpreter/evmc_gas_costs.nim +++ b/nimbus/evm/interpreter/evmc_gas_costs.nim @@ -57,9 +57,8 @@ func storageCostSpec(): array[EVMFork, StorageCostSpec] {.compileTime.} = netCost: true, warmAccess: WarmStorageReadCost, sset: 20000, reset: 5000 - ColdSloadCost, clear: 4800) - result[FkParis] = result[FkLondon] - result[FkShanghai] = result[FkLondon] - result[FkCancun] = result[FkLondon] + for fork in FkParis..EVMFork.high: + result[fork] = result[FkLondon] proc legacySStoreCost(e: var SstoreCosts, c: StorageCostSpec) {.compileTime.} = diff --git a/nimbus/evm/interpreter/gas_meter.nim b/nimbus/evm/interpreter/gas_meter.nim index 9f6f9586c9..8ae5e649fd 100644 --- a/nimbus/evm/interpreter/gas_meter.nim +++ b/nimbus/evm/interpreter/gas_meter.nim @@ -19,16 +19,17 @@ func init*(m: var GasMeter, startGas: GasInt) = m.gasRemaining = startGas m.gasRefunded = 0 -func consumeGas*( - gasMeter: var GasMeter; amount: GasInt; reason: static string): EvmResultVoid {.inline.} = +template consumeGas*( + gasMeter: var GasMeter; amount: GasInt; reason: static string): EvmResultVoid = # consumeGas is a hotspot in the vm due to it being called for every # instruction # TODO report reason - consumeGas is a hotspot in EVM execution so it has to # be done carefully if amount > gasMeter.gasRemaining: - return err(gasErr(OutOfGas)) - gasMeter.gasRemaining -= amount - ok() + EvmResultVoid.err(gasErr(OutOfGas)) + else: + gasMeter.gasRemaining -= amount + EvmResultVoid.ok() func returnGas*(gasMeter: var GasMeter; amount: GasInt) = gasMeter.gasRemaining += amount diff --git a/nimbus/evm/interpreter/op_dispatcher.nim b/nimbus/evm/interpreter/op_dispatcher.nim index 6aa07da792..9c47872eb2 100644 --- a/nimbus/evm/interpreter/op_dispatcher.nim +++ b/nimbus/evm/interpreter/op_dispatcher.nim @@ -37,6 +37,7 @@ template handleStopDirective(cpt: VmCpt, tracingEnabled: bool) = if not cpt.code.atEnd(): # we only trace `REAL STOP` and ignore `FAKE STOP` cpt.opIndex = cpt.traceOpCodeStarted(Stop) + ?cpt.opcodeGasCost(Stop, 0, tracingEnabled, reason = $Stop) cpt.traceOpCodeEnded(Stop, cpt.opIndex) template handleFixedGasCostsDirective( diff --git a/nimbus/evm/interpreter/op_handlers/oph_call.nim b/nimbus/evm/interpreter/op_handlers/oph_call.nim index 034c64178e..82f4b62f8a 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_call.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_call.nim @@ -203,10 +203,10 @@ else: c.gasMeter.refundGas(child.gasMeter.gasRefunded) c.stack.lsTop(1) - c.returnData = child.output let actualOutputSize = min(memLen, child.output.len) if actualOutputSize > 0: ? c.memory.write(memPos, child.output.toOpenArray(0, actualOutputSize - 1)) + c.returnData = move(child.output) ok() # ------------------------------------------------------------------------------ diff --git a/nimbus/evm/interpreter/op_handlers/oph_create.nim b/nimbus/evm/interpreter/op_handlers/oph_create.nim index 515fd3d724..6f051849df 100644 --- a/nimbus/evm/interpreter/op_handlers/oph_create.nim +++ b/nimbus/evm/interpreter/op_handlers/oph_create.nim @@ -78,7 +78,7 @@ else: c.stack.lsTop child.msg.contractAddress elif not child.error.burnsGas: # Means return was `REVERT`. # From create, only use `outputData` if child returned with `REVERT`. - c.returnData = child.output + c.returnData = move(child.output) ok() diff --git a/nimbus/evm/precompiles.nim b/nimbus/evm/precompiles.nim index a1d6b49b82..3bececfb8f 100644 --- a/nimbus/evm/precompiles.nim +++ b/nimbus/evm/precompiles.nim @@ -98,7 +98,7 @@ func getSignature(c: Computation): EvmResult[SigRes] = # used for R and S if maxPos >= 64: # Copy message data to buffer - bytes[0..(maxPos-64)] = data[64..maxPos] + assign(bytes.toOpenArray(0, (maxPos-64)), data.toOpenArray(64, maxPos)) let sig = Signature.fromRaw(bytes).valueOr: return err(prcErr(PrcInvalidSig)) @@ -301,18 +301,18 @@ func bn256ecAdd(c: Computation, fork: EVMFork = FkByzantium): EvmResultVoid = var input: array[128, byte] - output: array[64, byte] # Padding data let len = min(c.msg.data.len, 128) - 1 input[0..len] = c.msg.data[0..len] var p1 = ? G1.getPoint(input.toOpenArray(0, 63)) var p2 = ? G1.getPoint(input.toOpenArray(64, 127)) var apo = (p1 + p2).toAffine() + + c.output.setLen(64) if isSome(apo): # we can discard here because we supply proper buffer - discard apo.get().toBytes(output) + discard apo.get().toBytes(c.output) - assign(c.output, output) ok() func bn256ecMul(c: Computation, fork: EVMFork = FkByzantium): EvmResultVoid = @@ -321,19 +321,19 @@ func bn256ecMul(c: Computation, fork: EVMFork = FkByzantium): EvmResultVoid = var input: array[96, byte] - output: array[64, byte] # Padding data let len = min(c.msg.data.len, 96) - 1 - input[0..len] = c.msg.data[0..len] + assign(input.toOpenArray(0, len), c.msg.data.toOpenArray(0, len)) var p1 = ? G1.getPoint(input.toOpenArray(0, 63)) var fr = ? getFR(input.toOpenArray(64, 95)) var apo = (p1 * fr).toAffine() + + c.output.setLen(64) if isSome(apo): # we can discard here because we supply buffer of proper size - discard apo.get().toBytes(output) + discard apo.get().toBytes(c.output) - assign(c.output, output) ok() func bn256ecPairing(c: Computation, fork: EVMFork = FkByzantium): EvmResultVoid = @@ -348,10 +348,10 @@ func bn256ecPairing(c: Computation, fork: EVMFork = FkByzantium): EvmResultVoid GasECPairingBaseIstanbul + numPoints * GasECPairingPerPointIstanbul ? c.gasMeter.consumeGas(gasFee, reason="ecPairing Precompile") - var output: array[32, byte] + c.output.setLen(32) if msglen == 0: # we can discard here because we supply buffer of proper size - discard BNU256.one().toBytes(output) + discard BNU256.one().toBytes(c.output) else: # Calculate number of pairing pairs let count = msglen div 192 @@ -369,9 +369,8 @@ func bn256ecPairing(c: Computation, fork: EVMFork = FkByzantium): EvmResultVoid if acc == FQ12.one(): # we can discard here because we supply buffer of proper size - discard BNU256.one().toBytes(output) + discard BNU256.one().toBytes(c.output) - assign(c.output, output) ok() func blake2bf(c: Computation): EvmResultVoid = @@ -382,11 +381,9 @@ func blake2bf(c: Computation): EvmResultVoid = let gasFee = GasInt(beLoad32(input, 0)) ? c.gasMeter.consumeGas(gasFee, reason="blake2bf Precompile") - var output: array[64, byte] - if not blake2b_F(input, output): + c.output.setLen(64) + if not blake2b_F(input, c.output): return err(prcErr(PrcInvalidParam)) - else: - assign(c.output, output) ok() func blsG1Add(c: Computation): EvmResultVoid = @@ -407,7 +404,7 @@ func blsG1Add(c: Computation): EvmResultVoid = a.add b - c.output = newSeq[byte](128) + c.output.setLen(128) if not encodePoint(a, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -431,7 +428,7 @@ func blsG1Mul(c: Computation): EvmResultVoid = a.mul(scalar) - c.output = newSeq[byte](128) + c.output.setLen(128) if not encodePoint(a, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -504,7 +501,7 @@ func blsG1MultiExp(c: Computation): EvmResultVoid = else: acc.add(p) - c.output = newSeq[byte](128) + c.output.setLen(128) if not encodePoint(acc, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -527,7 +524,7 @@ func blsG2Add(c: Computation): EvmResultVoid = a.add b - c.output = newSeq[byte](256) + c.output.setLen(256) if not encodePoint(a, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -551,7 +548,7 @@ func blsG2Mul(c: Computation): EvmResultVoid = a.mul(scalar) - c.output = newSeq[byte](256) + c.output.setLen(256) if not encodePoint(a, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -593,7 +590,7 @@ func blsG2MultiExp(c: Computation): EvmResultVoid = else: acc.add(p) - c.output = newSeq[byte](256) + c.output.setLen(256) if not encodePoint(acc, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -643,7 +640,7 @@ func blsPairing(c: Computation): EvmResultVoid = else: acc.mul(millerLoop(g1, g2)) - c.output = newSeq[byte](32) + c.output.setLen(32) if acc.check(): c.output[^1] = 1.byte ok() @@ -663,7 +660,7 @@ func blsMapG1(c: Computation): EvmResultVoid = let p = fe.mapFPToG1() - c.output = newSeq[byte](128) + c.output.setLen(128) if not encodePoint(p, c.output): return err(prcErr(PrcInvalidPoint)) ok() @@ -683,7 +680,7 @@ func blsMapG2(c: Computation): EvmResultVoid = let p = fe.mapFPToG2() - c.output = newSeq[byte](256) + c.output.setLen(256) if not encodePoint(p, c.output): return err(prcErr(PrcInvalidPoint)) ok() diff --git a/nimbus/evm/state.nim b/nimbus/evm/state.nim index 192e8b3f44..4617bea963 100644 --- a/nimbus/evm/state.nim +++ b/nimbus/evm/state.nim @@ -110,7 +110,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor com = self.com db = com.db ac = if linear or self.stateDB.getStateRoot() == parent.stateRoot: self.stateDB - else: LedgerRef.init(db, self.stateDB.ac.storeSlotHash) + else: LedgerRef.init(db, self.stateDB.storeSlotHash) flags = self.flags self[].reset self.init( @@ -146,12 +146,12 @@ proc reinit*(self: BaseVMState; ## Object descriptor ## This is a variant of the `reinit()` function above where the field ## `header.parentHash`, is used to fetch the `parent` Header to be ## used in the `update()` variant, above. - var parent: Header - self.com.db.getBlockHeader(header.parentHash, parent) and - self.reinit( - parent = parent, - header = header, - linear = false) + let parent = self.com.db.getBlockHeader(header.parentHash).valueOr: + return false + self.reinit( + parent = parent, + header = header, + linear = false) proc init*( self: BaseVMState; ## Object descriptor @@ -204,16 +204,15 @@ proc new*( ## This is a variant of the `new()` constructor above where the field ## `header.parentHash`, is used to fetch the `parent` Header to be ## used in the `new()` variant, above. - var parent: Header - if com.db.getBlockHeader(header.parentHash, parent): - ok(BaseVMState.new( + let parent = com.db.getBlockHeader(header.parentHash).valueOr: + return err(evmErr(EvmHeaderNotFound)) + + ok(BaseVMState.new( parent = parent, header = header, com = com, tracer = tracer, storeSlotHash = storeSlotHash)) - else: - err(evmErr(EvmHeaderNotFound)) proc init*( vmState: BaseVMState; @@ -223,15 +222,15 @@ proc init*( storeSlotHash = false): bool = ## Variant of `new()` which does not throw an exception on a dangling ## `Header` parent hash reference. - var parent: Header - if com.db.getBlockHeader(header.parentHash, parent): - vmState.init( - parent = parent, - header = header, - com = com, - tracer = tracer, - storeSlotHash = storeSlotHash) - return true + let parent = com.db.getBlockHeader(header.parentHash).valueOr: + return false + vmState.init( + parent = parent, + header = header, + com = com, + tracer = tracer, + storeSlotHash = storeSlotHash) + return true func coinbase*(vmState: BaseVMState): Address = vmState.blockCtx.coinbase @@ -261,14 +260,9 @@ func baseFeePerGas*(vmState: BaseVMState): UInt256 = method getAncestorHash*( vmState: BaseVMState, blockNumber: BlockNumber): Hash32 {.gcsafe, base.} = let db = vmState.com.db - try: - var blockHash: Hash32 - if db.getBlockHash(blockNumber, blockHash): - blockHash - else: - default(Hash32) - except RlpError: - default(Hash32) + let blockHash = db.getBlockHash(blockNumber).valueOr: + return default(Hash32) + blockHash proc readOnlyStateDB*(vmState: BaseVMState): ReadOnlyStateDB {.inline.} = ReadOnlyStateDB(vmState.stateDB) diff --git a/nimbus/graphql/ethapi.nim b/nimbus/graphql/ethapi.nim index 3401e052c6..ef9053f6aa 100644 --- a/nimbus/graphql/ethapi.nim +++ b/nimbus/graphql/ethapi.nim @@ -154,39 +154,33 @@ proc getStateDB(com: CommonRef, header: Header): LedgerRef {.deprecated: "Ledge proc getBlockByNumber(ctx: GraphqlContextRef, number: Node): RespResult = try: - ok(headerNode(ctx, getBlockHeader(ctx.chainDB, toBlockNumber(number)))) - except CatchableError as e: - err(e.msg) + let header = ?ctx.chainDB.getBlockHeader(toBlockNumber(number)) + ok(headerNode(ctx, header)) + except ValueError as exc: + err(exc.msg) proc getBlockByNumber(ctx: GraphqlContextRef, number: base.BlockNumber): RespResult = - try: - ok(headerNode(ctx, getBlockHeader(ctx.chainDB, number))) - except CatchableError as e: - err(e.msg) + let header = ?ctx.chainDB.getBlockHeader(number) + ok(headerNode(ctx, header)) proc getBlockByHash(ctx: GraphqlContextRef, hash: Node): RespResult = try: - ok(headerNode(ctx, getBlockHeader(ctx.chainDB, toHash(hash)))) - except CatchableError as e: - err(e.msg) + let header = ?ctx.chainDB.getBlockHeader(toHash(hash)) + ok(headerNode(ctx, header)) + except ValueError as exc: + err(exc.msg) proc getBlockByHash(ctx: GraphqlContextRef, hash: Hash32): RespResult = - try: - ok(headerNode(ctx, getBlockHeader(ctx.chainDB, hash))) - except CatchableError as e: - err(e.msg) + let header = ?ctx.chainDB.getBlockHeader(hash) + ok(headerNode(ctx, header)) proc getLatestBlock(ctx: GraphqlContextRef): RespResult = - try: - ok(headerNode(ctx, getCanonicalHead(ctx.chainDB))) - except CatchableError as e: - err("can't get latest block: " & e.msg) + let header = ?ctx.chainDB.getCanonicalHead() + ok(headerNode(ctx, header)) proc getTxCount(ctx: GraphqlContextRef, txRoot: Hash32): RespResult = - try: - ok(resp(getTransactionCount(ctx.chainDB, txRoot))) - except CatchableError as e: - err("can't get txcount: " & e.msg) + let txCount = ctx.chainDB.getTransactionCount(txRoot) + ok(resp(txCount)) proc longNode(val: uint64 | int64): RespResult = ok(Node(kind: nkInt, intVal: $val, pos: Pos())) @@ -244,106 +238,85 @@ proc getTotalDifficulty(ctx: GraphqlContextRef, blockHash: Hash32): RespResult = bigIntNode(score) proc getOmmerCount(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = - try: - ok(resp(getUnclesCount(ctx.chainDB, ommersHash))) - except CatchableError as e: - err("can't get ommers count: " & e.msg) + let ommers = ?ctx.chainDB.getUnclesCount(ommersHash) + ok(resp(ommers)) proc getOmmers(ctx: GraphqlContextRef, ommersHash: Hash32): RespResult = - try: - let uncles = getUncles(ctx.chainDB, ommersHash) - when false: - # EIP 1767 says no ommers == null - # but hive test case want empty array [] - if uncles.len == 0: - return ok(respNull()) - var list = respList() - for n in uncles: - list.add headerNode(ctx, n) - ok(list) - except CatchableError as e: - err("can't get ommers: " & e.msg) - -proc getOmmerAt(ctx: GraphqlContextRef, ommersHash: Hash32, index: int): RespResult = - try: - let uncles = getUncles(ctx.chainDB, ommersHash) + let uncles = ?ctx.chainDB.getUncles(ommersHash) + when false: + # EIP 1767 says no ommers == null + # but hive test case want empty array [] if uncles.len == 0: return ok(respNull()) - if index < 0 or index >= uncles.len: - return ok(respNull()) - ok(headerNode(ctx, uncles[index])) - except CatchableError as e: - err("can't get ommer: " & e.msg) + var list = respList() + for n in uncles: + list.add headerNode(ctx, n) + ok(list) + +proc getOmmerAt(ctx: GraphqlContextRef, ommersHash: Hash32, index: int): RespResult = + let uncles = ?ctx.chainDB.getUncles(ommersHash) + if uncles.len == 0: + return ok(respNull()) + if index < 0 or index >= uncles.len: + return ok(respNull()) + ok(headerNode(ctx, uncles[index])) proc getTxs(ctx: GraphqlContextRef, header: Header): RespResult = - try: - let txCount = getTransactionCount(ctx.chainDB, header.txRoot) - if txCount == 0: - return ok(respNull()) - var list = respList() - var index = 0'u64 - for n in getBlockTransactionData(ctx.chainDB, header.txRoot): - let tx = decodeTx(n) - list.add txNode(ctx, tx, index, header.number, header.baseFeePerGas) - inc index - - index = 0'u64 - var prevUsed = 0.GasInt - for r in getReceipts(ctx.chainDB, header.receiptsRoot): - let tx = TxNode(list.sons[index]) - tx.receipt = r - tx.gasUsed = r.cumulativeGasUsed - prevUsed - prevUsed = r.cumulativeGasUsed - inc index + let txCount = getTransactionCount(ctx.chainDB, header.txRoot) + if txCount == 0: + return ok(respNull()) + var list = respList() + var index = 0'u64 + + let txList = ?ctx.chainDB.getTransactions(header.txRoot) + for tx in txList: + list.add txNode(ctx, tx, index, header.number, header.baseFeePerGas) + inc index + + index = 0'u64 + var prevUsed = 0.GasInt + let receiptList = ?ctx.chainDB.getReceipts(header.receiptsRoot) + for r in receiptList: + let tx = TxNode(list.sons[index]) + tx.receipt = r + tx.gasUsed = r.cumulativeGasUsed - prevUsed + prevUsed = r.cumulativeGasUsed + inc index - ok(list) - except CatchableError as e: - err("can't get transactions: " & e.msg) + ok(list) proc getWithdrawals(ctx: GraphqlContextRef, header: Header): RespResult = - try: - if header.withdrawalsRoot.isSome: - let wds = getWithdrawals(ctx.chainDB, header.withdrawalsRoot.get) - var list = respList() - for wd in wds: - list.add wdNode(ctx, wd) - ok(list) - else: - ok(respNull()) - except CatchableError as e: - err("can't get transactions: " & e.msg) + if header.withdrawalsRoot.isNone: + return ok(respNull()) + + let wds = ?ctx.chainDB.getWithdrawals(header.withdrawalsRoot.get) + var list = respList() + for wd in wds: + list.add wdNode(ctx, wd) + ok(list) proc getTxAt(ctx: GraphqlContextRef, header: Header, index: uint64): RespResult = - try: - var tx: Transaction - if getTransactionByIndex(ctx.chainDB, header.txRoot, index.uint16, tx): - let txn = txNode(ctx, tx, index, header.number, header.baseFeePerGas) - - var i = 0'u64 - var prevUsed = 0.GasInt - for r in getReceipts(ctx.chainDB, header.receiptsRoot): - if i == index: - let tx = TxNode(txn) - tx.receipt = r - tx.gasUsed = r.cumulativeGasUsed - prevUsed - prevUsed = r.cumulativeGasUsed - inc i - - ok(txn) - else: - ok(respNull()) - except CatchableError as exc: - err("can't get transaction by index '" & $index & "': " & exc.msg) - except RlpError as exc: - err("can't get transaction by index '" & $index & "': " & exc.msg) + let tx = ctx.chainDB.getTransactionByIndex(header.txRoot, index.uint16).valueOr: + return ok(respNull()) + + let txn = txNode(ctx, tx, index, header.number, header.baseFeePerGas) + var i = 0'u64 + var prevUsed = 0.GasInt + let receiptList = ?ctx.chainDB.getReceipts(header.receiptsRoot) + for r in receiptList: + if i == index: + let tx = TxNode(txn) + tx.receipt = r + tx.gasUsed = r.cumulativeGasUsed - prevUsed + prevUsed = r.cumulativeGasUsed + inc i + ok(txn) proc getTxByHash(ctx: GraphqlContextRef, hash: Hash32): RespResult = - try: - let (blockNumber, index) = getTransactionKey(ctx.chainDB, hash) - let header = getBlockHeader(ctx.chainDB, blockNumber) - getTxAt(ctx, header, index) - except CatchableError as e: - err("can't get transaction by hash '" & hash.data.toHex & "': $2" & e.msg) + let + txKey = ?ctx.chainDB.getTransactionKey(hash) + header = ?ctx.chainDB.getBlockHeader(txKey.blockNumber) + getTxAt(ctx, header, txKey.index) proc accountNode(ctx: GraphqlContextRef, header: Header, address: Address): RespResult = try: @@ -783,14 +756,14 @@ proc txAccessList(ud: RootRef, params: Args, parent: Node): RespResult {.apiPrag proc txMaxFeePerBlobGas(ud: RootRef, params: Args, parent: Node): RespResult {.apiPragma.} = let tx = TxNode(parent) - if tx.tx.txType < TxEIP4844: + if tx.tx.txType < TxEip4844: ok(respNull()) else: longNode(tx.tx.maxFeePerBlobGas) proc txVersionedHashes(ud: RootRef, params: Args, parent: Node): RespResult {.apiPragma.} = let tx = TxNode(parent) - if tx.tx.txType < TxEIP4844: + if tx.tx.txType < TxEip4844: ok(respNull()) else: var list = respList() diff --git a/nimbus/nimbus_execution_client.nim b/nimbus/nimbus_execution_client.nim index 289a3a24a3..342b9fbc50 100644 --- a/nimbus/nimbus_execution_client.nim +++ b/nimbus/nimbus_execution_client.nim @@ -17,6 +17,7 @@ import metrics, metrics/chronicles_support, kzg4844/kzg, + stew/byteutils, ./rpc, ./version, ./constants, @@ -24,7 +25,9 @@ import ./nimbus_import, ./core/eip4844, ./db/core_db/persistent, - ./sync/handlers + ./db/storage_types, + ./sync/handlers, + ./common/chain_config_hash from beacon_chain/nimbus_binary_common import setupFileLimits @@ -63,7 +66,7 @@ proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) = quit(QuitFailure) proc setupP2P(nimbus: NimbusNode, conf: NimbusConf, - com: CommonRef, protocols: set[ProtocolFlag]) = + com: CommonRef) = ## Creating P2P Server let kpres = nimbus.ctx.getNetKeys(conf.netKey, conf.dataDir.string) if kpres.isErr: @@ -104,19 +107,9 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf, bindIp = conf.listenAddress, rng = nimbus.ctx.rng) - # Add protocol capabilities based on protocol flags - for w in protocols: - case w: # handle all possibilities - of ProtocolFlag.Eth: - nimbus.ethNode.addEthHandlerCapability( - nimbus.ethNode.peerPool, - nimbus.chainRef, - nimbus.txPool) - # Cannot do without minimal `eth` capability - if ProtocolFlag.Eth notin protocols: - nimbus.ethNode.addEthHandlerCapability( - nimbus.ethNode.peerPool, - nimbus.chainRef) + # Add protocol capabilities + nimbus.ethNode.addEthHandlerCapability( + nimbus.ethNode.peerPool, nimbus.chainRef, nimbus.txPool) # Always initialise beacon syncer nimbus.beaconSyncRef = BeaconSyncRef.init( @@ -163,6 +156,24 @@ proc setupMetrics(nimbus: NimbusNode, conf: NimbusConf) = nimbus.metricsServer = res.get waitFor nimbus.metricsServer.start() +proc preventLoadingDataDirForTheWrongNetwork(db: CoreDbRef; conf: NimbusConf) = + let + kvt = db.ctx.getKvt() + calculatedId = calcHash(conf.networkId, conf.networkParams) + dataDirIdBytes = kvt.get(dataDirIdKey().toOpenArray).valueOr: + # an empty database + info "Writing data dir ID", ID=calculatedId + kvt.put(dataDirIdKey().toOpenArray, calculatedId.data).isOkOr: + fatal "Cannot write data dir ID", ID=calculatedId + quit(QuitFailure) + return + + if calculatedId.data != dataDirIdBytes: + fatal "Data dir already initialized with other network configuration", + get=dataDirIdBytes.toHex, + expected=calculatedId + quit(QuitFailure) + proc run(nimbus: NimbusNode, conf: NimbusConf) = ## logging setLogLevel(conf.logLevel) @@ -202,6 +213,7 @@ proc run(nimbus: NimbusNode, conf: NimbusConf) = string conf.dataDir, conf.dbOptions(noKeyCache = conf.cmd == NimbusCmd.`import`)) + preventLoadingDataDirForTheWrongNetwork(coreDB, conf) setupMetrics(nimbus, conf) let com = CommonRef.new( @@ -210,6 +222,13 @@ proc run(nimbus: NimbusNode, conf: NimbusConf) = networkId = conf.networkId, params = conf.networkParams) + if conf.extraData.len > 32: + warn "ExtraData exceeds 32 bytes limit, truncate", + extraData=conf.extraData, + len=conf.extraData.len + + com.extraData = conf.extraData + defer: com.db.finish() @@ -217,18 +236,15 @@ proc run(nimbus: NimbusNode, conf: NimbusConf) = of NimbusCmd.`import`: importBlocks(conf, com) else: - let protocols = conf.getProtocolFlags() - basicServices(nimbus, conf, com) manageAccounts(nimbus, conf) - setupP2P(nimbus, conf, com, protocols) - setupRpc(nimbus, conf, com, protocols) + setupP2P(nimbus, conf, com) + setupRpc(nimbus, conf, com) - if conf.maxPeers > 0: + if conf.maxPeers > 0 and conf.engineApiServerEnabled(): # Not starting syncer if there is definitely no way to run it. This # avoids polling (i.e. waiting for instructions) and some logging. - let resumeOnly = not conf.engineApiServerEnabled() - if not nimbus.beaconSyncRef.start(resumeOnly): + if not nimbus.beaconSyncRef.start(): nimbus.beaconSyncRef = BeaconSyncRef(nil) if nimbus.state == NimbusState.Starting: diff --git a/nimbus/rpc.nim b/nimbus/rpc.nim index b2ac823232..c329b693ed 100644 --- a/nimbus/rpc.nim +++ b/nimbus/rpc.nim @@ -53,7 +53,7 @@ func installRPC(server: RpcServer, setupCommonRpc(nimbus.ethNode, conf, server) if RpcFlag.Eth in flags: - setupServerAPI(serverApi, server) + setupServerAPI(serverApi, server, nimbus.ctx) # # Tracer is currently disabled # if RpcFlag.Debug in flags: @@ -140,7 +140,6 @@ func addHandler(handlers: var seq[RpcHandlerProc], proc addHttpServices(handlers: var seq[RpcHandlerProc], nimbus: NimbusNode, conf: NimbusConf, com: CommonRef, serverApi: ServerAPIRef, - protocols: set[ProtocolFlag], address: TransportAddress) = # The order is important: graphql, ws, rpc @@ -156,16 +155,14 @@ proc addHttpServices(handlers: var seq[RpcHandlerProc], if conf.wsEnabled: let server = newRpcWebsocketHandler() - var rpcFlags = conf.getWsFlags() - if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth + let rpcFlags = conf.getWsFlags() + {RpcFlag.Eth} installRPC(server, nimbus, conf, com, serverApi, rpcFlags) handlers.addHandler(server) info "JSON-RPC WebSocket API enabled", url = "ws://" & $address if conf.rpcEnabled: let server = newRpcHttpHandler() - var rpcFlags = conf.getRpcFlags() - if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth + let rpcFlags = conf.getRpcFlags() + {RpcFlag.Eth} installRPC(server, nimbus, conf, com, serverApi, rpcFlags) handlers.addHandler(server) info "JSON-RPC API enabled", url = "http://" & $address @@ -193,7 +190,7 @@ proc addEngineApiServices(handlers: var seq[RpcHandlerProc], proc addServices(handlers: var seq[RpcHandlerProc], nimbus: NimbusNode, conf: NimbusConf, - com: CommonRef, serverApi: ServerAPIRef, protocols: set[ProtocolFlag], + com: CommonRef, serverApi: ServerAPIRef, address: TransportAddress) = # The order is important: graphql, ws, rpc @@ -215,8 +212,7 @@ proc addServices(handlers: var seq[RpcHandlerProc], info "Engine WebSocket API enabled", url = "ws://" & $address if conf.wsEnabled: - var rpcFlags = conf.getWsFlags() - if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth + let rpcFlags = conf.getWsFlags() + {RpcFlag.Eth} installRPC(server, nimbus, conf, com, serverApi, rpcFlags) info "JSON-RPC WebSocket API enabled", url = "ws://" & $address @@ -232,8 +228,7 @@ proc addServices(handlers: var seq[RpcHandlerProc], info "Engine API enabled", url = "http://" & $address if conf.rpcEnabled: - var rpcFlags = conf.getRpcFlags() - if ProtocolFlag.Eth in protocols: rpcFlags.incl RpcFlag.Eth + let rpcFlags = conf.getRpcFlags() + {RpcFlag.Eth} installRPC(server, nimbus, conf, com, serverApi, rpcFlags) info "JSON-RPC API enabled", url = "http://" & $address @@ -241,7 +236,7 @@ proc addServices(handlers: var seq[RpcHandlerProc], handlers.addHandler(server) proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, - com: CommonRef, protocols: set[ProtocolFlag]) = + com: CommonRef) = if not conf.engineApiEnabled: warn "Engine API disabled, the node will not respond to consensus client updates (enable with `--engine-api`)" @@ -268,7 +263,7 @@ proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, let hooks: seq[RpcAuthHook] = @[jwtAuthHook, corsHook] var handlers: seq[RpcHandlerProc] let address = initTAddress(conf.httpAddress, conf.httpPort) - handlers.addServices(nimbus, conf, com, serverApi, protocols, address) + handlers.addServices(nimbus, conf, com, serverApi, address) let res = newHttpServerWithParams(address, hooks, handlers) if res.isErr: fatal "Cannot create RPC server", msg=res.error @@ -281,7 +276,7 @@ proc setupRpc*(nimbus: NimbusNode, conf: NimbusConf, let hooks = @[corsHook] var handlers: seq[RpcHandlerProc] let address = initTAddress(conf.httpAddress, conf.httpPort) - handlers.addHttpServices(nimbus, conf, com, serverApi, protocols, address) + handlers.addHttpServices(nimbus, conf, com, serverApi, address) let res = newHttpServerWithParams(address, hooks, handlers) if res.isErr: fatal "Cannot create RPC server", msg=res.error diff --git a/nimbus/rpc/p2p.nim b/nimbus/rpc/p2p.nim deleted file mode 100644 index 7588787a7d..0000000000 --- a/nimbus/rpc/p2p.nim +++ /dev/null @@ -1,607 +0,0 @@ -# Nimbus -# Copyright (c) 2018-2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -# * MIT license ([LICENSE-MIT](LICENSE-MIT)) -# at your option. -# This file may not be copied, modified, or distributed except according to -# those terms. - -{.push raises: [].} - -import - std/[sequtils, times, tables, typetraits], - json_rpc/rpcserver, - stint, - stew/byteutils, - json_serialization, - web3/conversions, - json_serialization/stew/results, - eth/common/eth_types_json_serialization, - eth/[rlp, p2p], - ".."/[transaction, evm/state, constants], - ../db/ledger, - ./rpc_types, ./rpc_utils, ./oracle, - ../transaction/call_evm, - ../core/tx_pool, - ../core/eip4844, - ../common/[common, context], - ../utils/utils, - ../beacon/web3_eth_conv, - ../evm/evm_errors, - ./filters - -type - Header = eth_types.Header - Hash32 = eth_types.Hash32 - -proc getProof*( - accDB: LedgerRef, - address: eth_types.Address, - slots: seq[UInt256]): ProofResponse = - let - acc = accDB.getEthAccount(address) - accExists = accDB.accountExists(address) - accountProof = accDB.getAccountProof(address) - slotProofs = accDB.getStorageProof(address, slots) - - var storage = newSeqOfCap[StorageProof](slots.len) - - for i, slotKey in slots: - let slotValue = accDB.getStorage(address, slotKey) - storage.add(StorageProof( - key: slotKey, - value: slotValue, - proof: seq[RlpEncodedBytes](slotProofs[i]))) - - if accExists: - ProofResponse( - address: address, - accountProof: seq[RlpEncodedBytes](accountProof), - balance: acc.balance, - nonce: w3Qty(acc.nonce), - codeHash: acc.codeHash, - storageHash: acc.storageRoot, - storageProof: storage) - else: - ProofResponse( - address: address, - accountProof: seq[RlpEncodedBytes](accountProof), - storageProof: storage) - -proc setupEthRpc*( - node: EthereumNode, ctx: EthContext, com: CommonRef, - txPool: TxPoolRef, oracle: Oracle, server: RpcServer) = - - let chainDB = com.db - proc getStateDB(header:Header): LedgerRef = - ## Retrieves the account db from canonical head - # we don't use accounst_cache here because it's only read operations - LedgerRef.init(chainDB) - - proc stateDBFromTag(quantityTag: BlockTag, readOnly = true): LedgerRef - {.gcsafe, raises: [CatchableError].} = - getStateDB(chainDB.headerFromTag(quantityTag)) - - server.rpc("eth_chainId") do() -> Web3Quantity: - return w3Qty(distinctBase(com.chainId)) - - server.rpc("eth_syncing") do() -> SyncingStatus: - ## Returns SyncObject or false when not syncing. - if com.syncState != Waiting: - let sync = SyncObject( - startingBlock: w3Qty com.syncStart, - currentBlock : w3Qty com.syncCurrent, - highestBlock : w3Qty com.syncHighest - ) - return SyncingStatus(syncing: true, syncObject: sync) - else: - return SyncingStatus(syncing: false) - - server.rpc("eth_gasPrice") do() -> Web3Quantity: - ## Returns an integer of the current gas price in wei. - w3Qty(calculateMedianGasPrice(chainDB).uint64) - - server.rpc("eth_accounts") do() -> seq[eth_types.Address]: - ## Returns a list of addresses owned by client. - result = newSeqOfCap[eth_types.Address](ctx.am.numAccounts) - for k in ctx.am.addresses: - result.add k - - server.rpc("eth_blockNumber") do() -> Web3Quantity: - ## Returns integer of the current block number the client is on. - w3Qty(chainDB.getCanonicalHead().number) - - server.rpc("eth_getBalance") do(data: eth_types.Address, quantityTag: BlockTag) -> UInt256: - ## Returns the balance of the account of given address. - ## - ## data: address to check for balance. - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns integer of the current balance in wei. - let - accDB = stateDBFromTag(quantityTag) - address = data - accDB.getBalance(address) - - server.rpc("eth_getStorageAt") do(data: eth_types.Address, slot: UInt256, quantityTag: BlockTag) -> eth_types.FixedBytes[32]: - ## Returns the value from a storage position at a given address. - ## - ## data: address of the storage. - ## slot: integer of the position in the storage. - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns: the value at this storage position. - let - accDB = stateDBFromTag(quantityTag) - address = data - data = accDB.getStorage(address, slot) - data.to(Bytes32) - - server.rpc("eth_getTransactionCount") do(data: eth_types.Address, quantityTag: BlockTag) -> Web3Quantity: - ## Returns the number of transactions sent from an address. - ## - ## data: address. - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns integer of the number of transactions send from this address. - let - address = data - accDB = stateDBFromTag(quantityTag) - w3Qty(accDB.getNonce(address)) - - server.rpc("eth_getBlockTransactionCountByHash") do(data: Hash32) -> Web3Quantity: - ## Returns the number of transactions in a block from a block matching the given block hash. - ## - ## data: hash of a block - ## Returns integer of the number of transactions in this block. - let - blockHash = data - header = chainDB.getBlockHeader(blockHash) - txCount = chainDB.getTransactionCount(header.txRoot) - Web3Quantity(txCount) - - server.rpc("eth_getBlockTransactionCountByNumber") do(quantityTag: BlockTag) -> Web3Quantity: - ## Returns the number of transactions in a block matching the given block number. - ## - ## data: integer of a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. - ## Returns integer of the number of transactions in this block. - let - header = chainDB.headerFromTag(quantityTag) - txCount = chainDB.getTransactionCount(header.txRoot) - Web3Quantity(txCount) - - server.rpc("eth_getUncleCountByBlockHash") do(data: Hash32) -> Web3Quantity: - ## Returns the number of uncles in a block from a block matching the given block hash. - ## - ## data: hash of a block. - ## Returns integer of the number of uncles in this block. - let - blockHash = data - header = chainDB.getBlockHeader(blockHash) - unclesCount = chainDB.getUnclesCount(header.ommersHash) - Web3Quantity(unclesCount) - - server.rpc("eth_getUncleCountByBlockNumber") do(quantityTag: BlockTag) -> Web3Quantity: - ## Returns the number of uncles in a block from a block matching the given block number. - ## - ## quantityTag: integer of a block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns integer of uncles in this block. - let - header = chainDB.headerFromTag(quantityTag) - unclesCount = chainDB.getUnclesCount(header.ommersHash) - Web3Quantity(unclesCount) - - server.rpc("eth_getCode") do(data: eth_types.Address, quantityTag: BlockTag) -> seq[byte]: - ## Returns code at a given address. - ## - ## data: address - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns the code from the given address. - let - accDB = stateDBFromTag(quantityTag) - address = data - accDB.getCode(address).bytes() - - template sign(privateKey: PrivateKey, message: string): seq[byte] = - # message length encoded as ASCII representation of decimal - let msgData = "\x19Ethereum Signed Message:\n" & $message.len & message - @(sign(privateKey, msgData.toBytes()).toRaw()) - - server.rpc("eth_sign") do(data: eth_types.Address, message: seq[byte]) -> seq[byte]: - ## The sign method calculates an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" + len(message) + message))). - ## By adding a prefix to the message makes the calculated signature recognisable as an Ethereum specific signature. - ## This prevents misuse where a malicious DApp can sign arbitrary data (e.g. transaction) and use the signature to impersonate the victim. - ## Note the address to sign with must be unlocked. - ## - ## data: address. - ## message: message to sign. - ## Returns signature. - let - address = data - acc = ctx.am.getAccount(address).tryGet() - - if not acc.unlocked: - raise newException(ValueError, "Account locked, please unlock it first") - sign(acc.privateKey, cast[string](message)) - - server.rpc("eth_signTransaction") do(data: TransactionArgs) -> seq[byte]: - ## Signs a transaction that can be submitted to the network at a later time using with - ## eth_sendRawTransaction - let - address = data.`from`.get() - acc = ctx.am.getAccount(address).tryGet() - - if not acc.unlocked: - raise newException(ValueError, "Account locked, please unlock it first") - - let - accDB = stateDBFromTag(blockId("latest")) - tx = unsignedTx(data, chainDB, accDB.getNonce(address) + 1, com.chainId) - eip155 = com.isEIP155(com.syncCurrent) - signedTx = signTransaction(tx, acc.privateKey, eip155) - result = rlp.encode(signedTx) - - server.rpc("eth_sendTransaction") do(data: TransactionArgs) -> Hash32: - ## Creates new message call transaction or a contract creation, if the data field contains code. - ## - ## obj: the transaction object. - ## Returns the transaction hash, or the zero hash if the transaction is not yet available. - ## Note: Use eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract. - let - address = data.`from`.get() - acc = ctx.am.getAccount(address).tryGet() - - if not acc.unlocked: - raise newException(ValueError, "Account locked, please unlock it first") - - let - accDB = stateDBFromTag(blockId("latest")) - tx = unsignedTx(data, chainDB, accDB.getNonce(address) + 1, com.chainId) - eip155 = com.isEIP155(com.syncCurrent) - signedTx = signTransaction(tx, acc.privateKey, eip155) - networkPayload = - if signedTx.txType == TxEip4844: - if data.blobs.isNone or data.commitments.isNone or data.proofs.isNone: - raise newException(ValueError, "EIP-4844 transaction needs blobs") - if data.blobs.get.len != signedTx.versionedHashes.len: - raise newException(ValueError, "Incorrect number of blobs") - if data.commitments.get.len != signedTx.versionedHashes.len: - raise newException(ValueError, "Incorrect number of commitments") - if data.proofs.get.len != signedTx.versionedHashes.len: - raise newException(ValueError, "Incorrect number of proofs") - NetworkPayload( - blobs: data.blobs.get.mapIt it.NetworkBlob, - commitments: data.commitments.get, - proofs: data.proofs.get) - else: - if data.blobs.isSome or data.commitments.isSome or data.proofs.isSome: - raise newException(ValueError, "Blobs require EIP-4844 transaction") - nil - pooledTx = PooledTransaction(tx: signedTx, networkPayload: networkPayload) - - txPool.add(pooledTx) - rlpHash(signedTx) - - server.rpc("eth_sendRawTransaction") do(txBytes: seq[byte]) -> Hash32: - ## Creates new message call transaction or a contract creation for signed transactions. - ## - ## data: the signed transaction data. - ## Returns the transaction hash, or the zero hash if the transaction is not yet available. - ## Note: Use eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract. - let - pooledTx = decodePooledTx(txBytes) - txHash = rlpHash(pooledTx) - - txPool.add(pooledTx) - let res = txPool.inPoolAndReason(txHash) - if res.isErr: - raise newException(ValueError, res.error) - txHash - - server.rpc("eth_call") do(args: TransactionArgs, quantityTag: BlockTag) -> seq[byte]: - ## Executes a new message call immediately without creating a transaction on the block chain. - ## - ## call: the transaction call object. - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns the return value of executed contract. - let - header = headerFromTag(chainDB, quantityTag) - res = rpcCallEvm(args, header, com).valueOr: - raise newException(ValueError, "rpcCallEvm error: " & $error.code) - res.output - - server.rpc("eth_estimateGas") do(args: TransactionArgs) -> Web3Quantity: - ## Generates and returns an estimate of how much gas is necessary to allow the transaction to complete. - ## The transaction will not be added to the blockchain. Note that the estimate may be significantly more than - ## the amount of gas actually used by the transaction, for a variety of reasons including EVM mechanics and node performance. - ## - ## args: the transaction call object. - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns the amount of gas used. - let - header = chainDB.headerFromTag(blockId("latest")) - # TODO: DEFAULT_RPC_GAS_CAP should configurable - gasUsed = rpcEstimateGas(args, header, com, DEFAULT_RPC_GAS_CAP).valueOr: - raise newException(ValueError, "rpcEstimateGas error: " & $error.code) - w3Qty(gasUsed) - - server.rpc("eth_getBlockByHash") do(data: Hash32, fullTransactions: bool) -> BlockObject: - ## Returns information about a block by hash. - ## - ## data: Hash of a block. - ## fullTransactions: If true it returns the full transaction objects, if false only the hashes of the transactions. - ## Returns BlockObject or nil when no block was found. - var - header:Header - hash = data - - if chainDB.getBlockHeader(hash, header): - populateBlockObject(header, chainDB, fullTransactions) - else: - nil - - server.rpc("eth_getBlockByNumber") do(quantityTag: BlockTag, fullTransactions: bool) -> BlockObject: - ## Returns information about a block by block number. - ## - ## quantityTag: integer of a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. - ## fullTransactions: If true it returns the full transaction objects, if false only the hashes of the transactions. - ## Returns BlockObject or nil when no block was found. - try: - let header = chainDB.headerFromTag(quantityTag) - populateBlockObject(header, chainDB, fullTransactions) - except CatchableError: - nil - - server.rpc("eth_getTransactionByHash") do(data: Hash32) -> TransactionObject: - ## Returns the information about a transaction requested by transaction hash. - ## - ## data: hash of a transaction. - ## Returns requested transaction information. - let txHash = data - let res = txPool.getItem(txHash) - if res.isOk: - return populateTransactionObject(res.get().tx) - - let txDetails = chainDB.getTransactionKey(txHash) - if txDetails.index < 0: - return nil - - let header = chainDB.getBlockHeader(txDetails.blockNumber) - var tx: Transaction - if chainDB.getTransactionByIndex(header.txRoot, uint16(txDetails.index), tx): - result = populateTransactionObject(tx, Opt.some(header), Opt.some(txDetails.index)) - - server.rpc("eth_getTransactionByBlockHashAndIndex") do(data: Hash32, quantity: Web3Quantity) -> TransactionObject: - ## Returns information about a transaction by block hash and transaction index position. - ## - ## data: hash of a block. - ## quantity: integer of the transaction index position. - ## Returns requested transaction information. - let index = uint64(quantity) - var header:Header - if not chainDB.getBlockHeader(data, header): - return nil - - var tx: Transaction - if chainDB.getTransactionByIndex(header.txRoot, uint16(index), tx): - populateTransactionObject(tx, Opt.some(header), Opt.some(index)) - else: - nil - - server.rpc("eth_getTransactionByBlockNumberAndIndex") do(quantityTag: BlockTag, quantity: Web3Quantity) -> TransactionObject: - ## Returns information about a transaction by block number and transaction index position. - ## - ## quantityTag: a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. - ## quantity: the transaction index position. - let - header = chainDB.headerFromTag(quantityTag) - index = uint64(quantity) - - var tx: Transaction - if chainDB.getTransactionByIndex(header.txRoot, uint16(index), tx): - populateTransactionObject(tx, Opt.some(header), Opt.some(index)) - else: - nil - - server.rpc("eth_getTransactionReceipt") do(data: Hash32) -> ReceiptObject: - ## Returns the receipt of a transaction by transaction hash. - ## - ## data: hash of a transaction. - ## Returns transaction receipt. - - let txDetails = chainDB.getTransactionKey(data) - if txDetails.index < 0: - return nil - - let header = chainDB.getBlockHeader(txDetails.blockNumber) - var tx: Transaction - if not chainDB.getTransactionByIndex(header.txRoot, uint16(txDetails.index), tx): - return nil - - var - idx = 0'u64 - prevGasUsed = GasInt(0) - - for receipt in chainDB.getReceipts(header.receiptsRoot): - let gasUsed = receipt.cumulativeGasUsed - prevGasUsed - prevGasUsed = receipt.cumulativeGasUsed - if idx == txDetails.index: - return populateReceipt(receipt, gasUsed, tx, txDetails.index, header) - idx.inc - - server.rpc("eth_getUncleByBlockHashAndIndex") do(data: Hash32, quantity: Web3Quantity) -> BlockObject: - ## Returns information about a uncle of a block by hash and uncle index position. - ## - ## data: hash of block. - ## quantity: the uncle's index position. - ## Returns BlockObject or nil when no block was found. - let index = uint64(quantity) - var header:Header - if not chainDB.getBlockHeader(data, header): - return nil - - let uncles = chainDB.getUncles(header.ommersHash) - if index < 0 or index >= uncles.len.uint64: - return nil - - result = populateBlockObject(uncles[index], chainDB, false, true) - result.totalDifficulty = chainDB.getScore(header.blockHash).valueOr(0.u256) - - server.rpc("eth_getUncleByBlockNumberAndIndex") do(quantityTag: BlockTag, quantity: Web3Quantity) -> BlockObject: - # Returns information about a uncle of a block by number and uncle index position. - ## - ## quantityTag: a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. - ## quantity: the uncle's index position. - ## Returns BlockObject or nil when no block was found. - let - index = uint64(quantity) - header = chainDB.headerFromTag(quantityTag) - uncles = chainDB.getUncles(header.ommersHash) - - if index < 0 or index >= uncles.len.uint64: - return nil - - result = populateBlockObject(uncles[index], chainDB, false, true) - result.totalDifficulty = chainDB.getScore(header.blockHash).valueOr(0.u256) - - proc getLogsForBlock( - chain: CoreDbRef, - hash: Hash32, - header:Header, - opts: FilterOptions): seq[FilterLog] - {.gcsafe, raises: [RlpError,BlockNotFound].} = - if headerBloomFilter(header, opts.address, opts.topics): - let blockBody = chain.getBlockBody(hash) - let receipts = chain.getReceipts(header.receiptsRoot) - # Note: this will hit assertion error if number of block transactions - # do not match block receipts. - # Although this is fine as number of receipts should always match number - # of transactions - let logs = deriveLogs(header, blockBody.transactions, receipts) - let filteredLogs = filterLogs(logs, opts.address, opts.topics) - return filteredLogs - else: - return @[] - - proc getLogsForRange( - chain: CoreDbRef, - start: common.BlockNumber, - finish: common.BlockNumber, - opts: FilterOptions): seq[FilterLog] - {.gcsafe, raises: [RlpError,BlockNotFound].} = - var logs = newSeq[FilterLog]() - var i = start - while i <= finish: - let res = chain.getBlockHeaderWithHash(i) - if res.isSome(): - let (hash, header)= res.unsafeGet() - let filtered = chain.getLogsForBlock(header, hash, opts) - logs.add(filtered) - else: - # - return logs - i = i + 1 - return logs - - server.rpc("eth_getLogs") do(filterOptions: FilterOptions) -> seq[FilterLog]: - ## filterOptions: settings for this filter. - ## Returns a list of all logs matching a given filter object. - ## TODO: Current implementation is pretty naive and not efficient - ## as it requires to fetch all transactions and all receipts from database. - ## Other clients (Geth): - ## - Store logs related data in receipts. - ## - Have separate indexes for Logs in given block - ## Both of those changes require improvements to the way how we keep our data - ## in Nimbus. - if filterOptions.blockHash.isSome(): - let hash = filterOptions.blockHash.unsafeGet() - let header = chainDB.getBlockHeader(hash) - return getLogsForBlock(chainDB, hash, header, filterOptions) - else: - # TODO: do something smarter with tags. It would be the best if - # tag would be an enum (Earliest, Latest, Pending, Number), and all operations - # would operate on this enum instead of raw strings. This change would need - # to be done on every endpoint to be consistent. - let fromHeader = chainDB.headerFromTag(filterOptions.fromBlock) - let toHeader = chainDB.headerFromTag(filterOptions.toBlock) - - # Note: if fromHeader.number > toHeader.number, no logs will be - # returned. This is consistent with, what other ethereum clients return - let logs = chainDB.getLogsForRange( - fromHeader.number, - toHeader.number, - filterOptions - ) - return logs - - server.rpc("eth_getProof") do(data: eth_types.Address, slots: seq[UInt256], quantityTag: BlockTag) -> ProofResponse: - ## Returns information about an account and storage slots (if the account is a contract - ## and the slots are requested) along with account and storage proofs which prove the - ## existence of the values in the state. - ## See spec here: https://eips.ethereum.org/EIPS/eip-1186 - ## - ## data: address of the account. - ## slots: integers of the positions in the storage to return with storage proofs. - ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. - ## Returns: the proof response containing the account, account proof and storage proof - - let - accDB = stateDBFromTag(quantityTag) - address = data - - getProof(accDB, address, slots) - - server.rpc("eth_getBlockReceipts") do(quantityTag: BlockTag) -> Opt[seq[ReceiptObject]]: - ## Returns the receipts of a block. - try: - let header = chainDB.headerFromTag(quantityTag) - var - prevGasUsed = GasInt(0) - recs: seq[ReceiptObject] - txs: seq[Transaction] - index = 0'u64 - - for tx in chainDB.getBlockTransactions(header): - txs.add tx - - for receipt in chainDB.getReceipts(header.receiptsRoot): - let gasUsed = receipt.cumulativeGasUsed - prevGasUsed - prevGasUsed = receipt.cumulativeGasUsed - recs.add populateReceipt(receipt, gasUsed, txs[index], index, header) - inc index - - return Opt.some(recs) - except CatchableError: - return Opt.none(seq[ReceiptObject]) - - server.rpc("eth_createAccessList") do(args: TransactionArgs, quantityTag: BlockTag) -> AccessListResult: - ## Generates an access list for a transaction. - try: - let - header = chainDB.headerFromTag(quantityTag) - return createAccessList(header, com, args) - except CatchableError as exc: - return AccessListResult( - error: Opt.some("createAccessList error: " & exc.msg), - ) - - server.rpc("eth_blobBaseFee") do() -> Web3Quantity: - ## Returns the base fee per blob gas in wei. - let header = chainDB.headerFromTag(blockId("latest")) - if header.blobGasUsed.isNone: - raise newException(ValueError, "blobGasUsed missing from latest header") - if header.excessBlobGas.isNone: - raise newException(ValueError, "excessBlobGas missing from latest header") - let blobBaseFee = getBlobBaseFee(header.excessBlobGas.get) * header.blobGasUsed.get.u256 - if blobBaseFee > high(uint64).u256: - raise newException(ValueError, "blobBaseFee is bigger than uint64.max") - return w3Qty blobBaseFee.truncate(uint64) - - server.rpc("eth_feeHistory") do(blockCount: Quantity, - newestBlock: BlockTag, - rewardPercentiles: Opt[seq[float64]]) -> FeeHistoryResult: - let - blocks = blockCount.uint64 - percentiles = rewardPercentiles.get(newSeq[float64]()) - res = feeHistory(oracle, blocks, newestBlock, percentiles) - if res.isErr: - raise newException(ValueError, res.error) - return res.get diff --git a/nimbus/rpc/rpc_utils.nim b/nimbus/rpc/rpc_utils.nim index d57c88f033..54bf90f673 100644 --- a/nimbus/rpc/rpc_utils.nim +++ b/nimbus/rpc/rpc_utils.nim @@ -10,7 +10,7 @@ {.push raises: [].} import - std/[strutils, algorithm], + std/[sequtils, algorithm], ./rpc_types, ./params, ../db/core_db, @@ -29,41 +29,11 @@ import ../common/common, web3/eth_api_types -const - defaultTag = blockId("latest") - -proc headerFromTag*(chain: CoreDbRef, blockId: BlockTag): Header - {.gcsafe, raises: [CatchableError].} = - - if blockId.kind == bidAlias: - let tag = blockId.alias.toLowerAscii - case tag - of "latest": result = chain.getCanonicalHead() - of "earliest": result = chain.getBlockHeader(GENESIS_BLOCK_NUMBER) - of "safe": result = chain.safeHeader() - of "finalized": result = chain.finalizedHeader() - of "pending": - #TODO: Implement get pending block - # We currently fall back to `latest` so that the `tx-spammer` in - # `ethpandaops/ethereum-package` can make progress. A real - # implementation is still required that takes into account any - # pending transactions that have not yet been bundled into a block. - result = chain.getCanonicalHead() - else: - raise newException(ValueError, "Unsupported block tag " & tag) - else: - let blockNum = blockId.number.uint64 - result = chain.getBlockHeader(blockNum) - -proc headerFromTag*(chain: CoreDbRef, blockTag: Opt[BlockTag]): Header - {.gcsafe, raises: [CatchableError].} = - let blockId = blockTag.get(defaultTag) - chain.headerFromTag(blockId) - -proc calculateMedianGasPrice*(chain: CoreDbRef): GasInt - {.gcsafe, raises: [CatchableError].} = +proc calculateMedianGasPrice*(chain: CoreDbRef): GasInt {.raises: [RlpError].} = + const minGasPrice = 30_000_000_000.GasInt var prices = newSeqOfCap[GasInt](64) - let header = chain.getCanonicalHead() + let header = chain.getCanonicalHead().valueOr: + return minGasPrice for encodedTx in chain.getBlockTransactionData(header.txRoot): let tx = decodeTx(encodedTx) prices.add(tx.gasPrice) @@ -85,40 +55,44 @@ proc calculateMedianGasPrice*(chain: CoreDbRef): GasInt # sane minimum for compatibility to unblock testing. # Note: When this is fixed, update `tests/graphql/queries.toml` and # re-enable the "query.gasPrice" test case (remove `skip = true`). - const minGasPrice = 30_000_000_000.GasInt result = max(result, minGasPrice) proc unsignedTx*(tx: TransactionArgs, chain: CoreDbRef, defaultNonce: AccountNonce, chainId: ChainId): Transaction {.gcsafe, raises: [CatchableError].} = + + var res: Transaction + if tx.to.isSome: - result.to = Opt.some(tx.to.get) + res.to = Opt.some(tx.to.get) if tx.gas.isSome: - result.gasLimit = tx.gas.get.GasInt + res.gasLimit = tx.gas.get.GasInt else: - result.gasLimit = 90000.GasInt + res.gasLimit = 90000.GasInt if tx.gasPrice.isSome: - result.gasPrice = tx.gasPrice.get.GasInt + res.gasPrice = tx.gasPrice.get.GasInt else: - result.gasPrice = calculateMedianGasPrice(chain) + res.gasPrice = calculateMedianGasPrice(chain) if tx.value.isSome: - result.value = tx.value.get + res.value = tx.value.get else: - result.value = 0.u256 + res.value = 0.u256 if tx.nonce.isSome: - result.nonce = tx.nonce.get.AccountNonce + res.nonce = tx.nonce.get.AccountNonce else: - result.nonce = defaultNonce + res.nonce = defaultNonce + + res.payload = tx.payload + res.chainId = chainId - result.payload = tx.payload - result.chainId = chainId + return res proc toWd(wd: Withdrawal): WithdrawalObject = WithdrawalObject( - index: Quantity wd.index, + index: Quantity(wd.index), validatorIndex: Quantity wd.validatorIndex, address: wd.address, amount: Quantity wd.amount, @@ -129,15 +103,34 @@ proc toWdList(list: openArray[Withdrawal]): seq[WithdrawalObject] = for x in list: result.add toWd(x) +func toWdList(x: Opt[seq[Withdrawal]]): + Opt[seq[WithdrawalObject]] = + if x.isNone: Opt.none(seq[WithdrawalObject]) + else: Opt.some(toWdList x.get) + +func toAuth*(x: Authorization): AuthorizationObject = + AuthorizationObject( + chainId: Quantity(x.chainId), + address: x.address, + nonce: Quantity(x.nonce), + v: Quantity(x.v), + r: x.r, + s: x.s, + ) + +proc toAuthList(list: openArray[Authorization]): seq[AuthorizationObject] = + result = newSeqOfCap[AuthorizationObject](list.len) + for x in list: + result.add toAuth(x) + proc populateTransactionObject*(tx: Transaction, - optionalHeader: Opt[Header] = Opt.none(Header), + optionalHash: Opt[eth_types.Hash32] = Opt.none(eth_types.Hash32), + optionalNumber: Opt[eth_types.BlockNumber] = Opt.none(eth_types.BlockNumber), txIndex: Opt[uint64] = Opt.none(uint64)): TransactionObject = result = TransactionObject() result.`type` = Opt.some Quantity(tx.txType) - if optionalHeader.isSome: - let header = optionalHeader.get - result.blockHash = Opt.some(header.blockHash) - result.blockNumber = Opt.some(Quantity(header.number)) + result.blockHash = optionalHash + result.blockNumber = w3Qty(optionalNumber) if (let sender = tx.recoverSender(); sender.isOk): result.`from` = sender[] @@ -160,21 +153,27 @@ proc populateTransactionObject*(tx: Transaction, result.chainId = Opt.some(Quantity(tx.chainId)) result.accessList = Opt.some(tx.accessList) - if tx.txType >= TxEIP4844: + if tx.txType >= TxEip4844: result.maxFeePerBlobGas = Opt.some(tx.maxFeePerBlobGas) result.blobVersionedHashes = Opt.some(tx.versionedHashes) -proc populateBlockObject*(header: Header, chain: CoreDbRef, fullTx: bool, isUncle = false): BlockObject - {.gcsafe, raises: [RlpError].} = - let blockHash = header.blockHash - result = BlockObject() + if tx.txType >= TxEip7702: + result.authorizationList = Opt.some(toAuthList(tx.authorizationList)) + +proc populateBlockObject*(blockHash: eth_types.Hash32, + blk: Block, + totalDifficulty: UInt256, + fullTx: bool, + withUncles: bool = false): BlockObject = + template header: auto = blk.header + result = BlockObject() result.number = Quantity(header.number) result.hash = blockHash result.parentHash = header.parentHash - result.nonce = Opt.some(FixedBytes[8] header.nonce) + result.nonce = Opt.some(header.nonce) result.sha3Uncles = header.ommersHash - result.logsBloom = FixedBytes[256] header.logsBloom + result.logsBloom = header.logsBloom result.transactionsRoot = header.txRoot result.stateRoot = header.stateRoot result.receiptsRoot = header.receiptsRoot @@ -184,57 +183,53 @@ proc populateBlockObject*(header: Header, chain: CoreDbRef, fullTx: bool, isUncl result.mixHash = Hash32 header.mixHash # discard sizeof(seq[byte]) of extraData and use actual length - let size = sizeof(Header) - sizeof(seq[byte]) + header.extraData.len + let size = sizeof(eth_types.Header) - sizeof(eth_api_types.Blob) + header.extraData.len result.size = Quantity(size) result.gasLimit = Quantity(header.gasLimit) result.gasUsed = Quantity(header.gasUsed) result.timestamp = Quantity(header.timestamp) result.baseFeePerGas = header.baseFeePerGas + result.totalDifficulty = totalDifficulty - if not isUncle: - result.totalDifficulty = chain.getScore(blockHash).valueOr(0.u256) - result.uncles = chain.getUncleHashes(header) + if not withUncles: + result.uncles = blk.uncles.mapIt(it.blockHash) - if fullTx: - var i = 0'u64 - for tx in chain.getBlockTransactions(header): - result.transactions.add txOrHash(populateTransactionObject(tx, Opt.some(header), Opt.some(i))) - inc i - else: - for x in chain.getBlockTransactionHashes(header): - result.transactions.add txOrHash(x) - - if header.withdrawalsRoot.isSome: - result.withdrawalsRoot = Opt.some(header.withdrawalsRoot.get) - result.withdrawals = Opt.some(toWdList(chain.getWithdrawals(header.withdrawalsRoot.get))) - - if header.blobGasUsed.isSome: - result.blobGasUsed = Opt.some(Quantity(header.blobGasUsed.get)) - - if header.excessBlobGas.isSome: - result.excessBlobGas = Opt.some(Quantity(header.excessBlobGas.get)) + if fullTx: + for i, tx in blk.transactions: + let txObj = populateTransactionObject(tx, + Opt.some(blockHash), + Opt.some(header.number), Opt.some(i.uint64)) + result.transactions.add txOrHash(txObj) + else: + for i, tx in blk.transactions: + let txHash = rlpHash(tx) + result.transactions.add txOrHash(txHash) - if header.parentBeaconBlockRoot.isSome: - result.parentBeaconBlockRoot = Opt.some(header.parentBeaconBlockRoot.get) + result.withdrawalsRoot = header.withdrawalsRoot + result.withdrawals = toWdList blk.withdrawals + result.parentBeaconBlockRoot = header.parentBeaconBlockRoot + result.blobGasUsed = w3Qty(header.blobGasUsed) + result.excessBlobGas = w3Qty(header.excessBlobGas) + result.requestsHash = header.requestsHash proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction, txIndex: uint64, header: Header): ReceiptObject = let sender = tx.recoverSender() - result = ReceiptObject() - result.transactionHash = tx.rlpHash - result.transactionIndex = Quantity(txIndex) - result.blockHash = header.blockHash - result.blockNumber = Quantity(header.number) + var res = ReceiptObject() + res.transactionHash = tx.rlpHash + res.transactionIndex = Quantity(txIndex) + res.blockHash = header.blockHash + res.blockNumber = Quantity(header.number) if sender.isSome(): - result.`from` = sender.get() - result.to = Opt.some(tx.destination) - result.cumulativeGasUsed = Quantity(receipt.cumulativeGasUsed) - result.gasUsed = Quantity(gasUsed) - result.`type` = Opt.some Quantity(receipt.receiptType) + res.`from` = sender.get() + res.to = Opt.some(tx.destination) + res.cumulativeGasUsed = Quantity(receipt.cumulativeGasUsed) + res.gasUsed = Quantity(gasUsed) + res.`type` = Opt.some Quantity(receipt.receiptType) if tx.contractCreation and sender.isSome: - result.contractAddress = Opt.some(tx.creationAddress(sender[])) + res.contractAddress = Opt.some(tx.creationAddress(sender[])) for log in receipt.logs: # TODO: Work everywhere with either `Hash32` as topic or `array[32, byte]` @@ -246,36 +241,38 @@ proc populateReceipt*(receipt: Receipt, gasUsed: GasInt, tx: Transaction, removed: false, # TODO: Not sure what is difference between logIndex and TxIndex and how # to calculate it. - logIndex: Opt.some(result.transactionIndex), + logIndex: Opt.some(res.transactionIndex), # Note: the next 4 fields cause a lot of duplication of data, but the spec # is what it is. Not sure if other clients actually add this. - transactionIndex: Opt.some(result.transactionIndex), - transactionHash: Opt.some(result.transactionHash), - blockHash: Opt.some(result.blockHash), - blockNumber: Opt.some(result.blockNumber), + transactionIndex: Opt.some(res.transactionIndex), + transactionHash: Opt.some(res.transactionHash), + blockHash: Opt.some(res.blockHash), + blockNumber: Opt.some(res.blockNumber), # The actual fields address: log.address, data: log.data, topics: topics ) - result.logs.add(logObject) + res.logs.add(logObject) - result.logsBloom = FixedBytes[256] receipt.logsBloom + res.logsBloom = FixedBytes[256] receipt.logsBloom # post-transaction stateroot (pre Byzantium). if receipt.hasStateRoot: - result.root = Opt.some(receipt.stateRoot) + res.root = Opt.some(receipt.stateRoot) else: # 1 = success, 0 = failure. - result.status = Opt.some(Quantity(receipt.status.uint64)) + res.status = Opt.some(Quantity(receipt.status.uint64)) let baseFeePerGas = header.baseFeePerGas.get(0.u256) let normTx = eip1559TxNormalization(tx, baseFeePerGas.truncate(GasInt)) - result.effectiveGasPrice = Quantity(normTx.gasPrice) + res.effectiveGasPrice = Quantity(normTx.gasPrice) if tx.txType == TxEip4844: - result.blobGasUsed = Opt.some(Quantity(tx.versionedHashes.len.uint64 * GAS_PER_BLOB.uint64)) - result.blobGasPrice = Opt.some(getBlobBaseFee(header.excessBlobGas.get(0'u64))) + res.blobGasUsed = Opt.some(Quantity(tx.versionedHashes.len.uint64 * GAS_PER_BLOB.uint64)) + res.blobGasPrice = Opt.some(getBlobBaseFee(header.excessBlobGas.get(0'u64))) + + return res proc createAccessList*(header: Header, com: CommonRef, @@ -335,4 +332,4 @@ proc createAccessList*(header: Header, gasUsed: Quantity res.gasUsed, ) - prevTracer = tracer + prevTracer = tracer \ No newline at end of file diff --git a/nimbus/rpc/server_api.nim b/nimbus/rpc/server_api.nim index 7b2310c562..90c1cd9ca2 100644 --- a/nimbus/rpc/server_api.nim +++ b/nimbus/rpc/server_api.nim @@ -10,9 +10,12 @@ {.push raises: [].} import + chronicles, + std/[sequtils, strutils], stint, web3/[conversions, eth_api_types], eth/common/base, + stew/byteutils, ../common/common, json_rpc/rpcserver, ../db/ledger, @@ -22,32 +25,68 @@ import ../transaction, ../transaction/call_evm, ../evm/evm_errors, + ../core/eip4844, ./rpc_types, ./rpc_utils, - ./filters, - ./server_api_helpers + ./filters -type - ServerAPIRef* = ref object - com: CommonRef - chain: ForkedChainRef - txPool: TxPoolRef +type ServerAPIRef* = ref object + com: CommonRef + chain: ForkedChainRef + txPool: TxPoolRef -const - defaultTag = blockId("latest") +const defaultTag = blockId("latest") func newServerAPI*(c: ForkedChainRef, t: TxPoolRef): ServerAPIRef = - ServerAPIRef( - com: c.com, - chain: c, - txPool: t - ) + ServerAPIRef(com: c.com, chain: c, txPool: t) + +proc getTotalDifficulty*(api: ServerAPIRef, blockHash: Hash32): UInt256 = + let totalDifficulty = api.com.db.getScore(blockHash).valueOr: + return api.com.db.headTotalDifficulty() + return totalDifficulty + +proc getProof*( + accDB: LedgerRef, address: eth_types.Address, slots: seq[UInt256] +): ProofResponse = + let + acc = accDB.getEthAccount(address) + accExists = accDB.accountExists(address) + accountProof = accDB.getAccountProof(address) + slotProofs = accDB.getStorageProof(address, slots) + + var storage = newSeqOfCap[StorageProof](slots.len) + + for i, slotKey in slots: + let slotValue = accDB.getStorage(address, slotKey) + storage.add( + StorageProof( + key: slotKey, value: slotValue, proof: seq[RlpEncodedBytes](slotProofs[i]) + ) + ) + + if accExists: + ProofResponse( + address: address, + accountProof: seq[RlpEncodedBytes](accountProof), + balance: acc.balance, + nonce: w3Qty(acc.nonce), + codeHash: acc.codeHash, + storageHash: acc.storageRoot, + storageProof: storage, + ) + else: + ProofResponse( + address: address, + accountProof: seq[RlpEncodedBytes](accountProof), + storageProof: storage, + ) proc headerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[Header, string] = if blockTag.kind == bidAlias: let tag = blockTag.alias.toLowerAscii case tag - of "latest": return ok(api.chain.latestHeader) + of "latest": + return ok(api.chain.latestHeader) else: return err("Unsupported block tag " & tag) else: @@ -60,11 +99,10 @@ proc headerFromTag(api: ServerAPIRef, blockTag: Opt[BlockTag]): Result[Header, s proc ledgerFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[LedgerRef, string] = let header = ?api.headerFromTag(blockTag) - if api.chain.stateReady(header): - ok(LedgerRef.init(api.com.db)) - else: - # TODO: Replay state? - err("Block state not ready") + if not api.chain.stateReady(header): + api.chain.replaySegment(header.blockHash) + + ok(LedgerRef.init(api.com.db)) proc blockFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[Block, string] = if blockTag.kind == bidAlias: @@ -78,31 +116,35 @@ proc blockFromTag(api: ServerAPIRef, blockTag: BlockTag): Result[Block, string] let blockNum = base.BlockNumber blockTag.number return api.chain.blockByNumber(blockNum) -proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = +proc setupServerAPI*(api: ServerAPIRef, server: RpcServer, ctx: EthContext) = server.rpc("eth_getBalance") do(data: Address, blockTag: BlockTag) -> UInt256: ## Returns the balance of the account of given address. let - ledger = api.ledgerFromTag(blockTag).valueOr: + ledger = api.ledgerFromTag(blockTag).valueOr: raise newException(ValueError, error) address = data ledger.getBalance(address) - server.rpc("eth_getStorageAt") do(data: Address, slot: UInt256, blockTag: BlockTag) -> FixedBytes[32]: + server.rpc("eth_getStorageAt") do( + data: Address, slot: UInt256, blockTag: BlockTag + ) -> FixedBytes[32]: ## Returns the value from a storage position at a given address. let - ledger = api.ledgerFromTag(blockTag).valueOr: + ledger = api.ledgerFromTag(blockTag).valueOr: raise newException(ValueError, error) address = data - value = ledger.getStorage(address, slot) + value = ledger.getStorage(address, slot) value.to(Bytes32) - server.rpc("eth_getTransactionCount") do(data: Address, blockTag: BlockTag) -> Web3Quantity: + server.rpc("eth_getTransactionCount") do( + data: Address, blockTag: BlockTag + ) -> Web3Quantity: ## Returns the number of transactions ak.s. nonce sent from an address. let - ledger = api.ledgerFromTag(blockTag).valueOr: + ledger = api.ledgerFromTag(blockTag).valueOr: raise newException(ValueError, error) address = data - nonce = ledger.getNonce(address) + nonce = ledger.getNonce(address) Quantity(nonce) server.rpc("eth_blockNumber") do() -> Web3Quantity: @@ -119,12 +161,14 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = ## blockTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. ## Returns the code from the given address. let - ledger = api.ledgerFromTag(blockTag).valueOr: + ledger = api.ledgerFromTag(blockTag).valueOr: raise newException(ValueError, error) address = data ledger.getCode(address).bytes() - server.rpc("eth_getBlockByHash") do(data: Hash32, fullTransactions: bool) -> BlockObject: + server.rpc("eth_getBlockByHash") do( + data: Hash32, fullTransactions: bool + ) -> BlockObject: ## Returns information about a block by hash. ## ## data: Hash of a block. @@ -135,9 +179,13 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = let blk = api.chain.blockByHash(blockHash).valueOr: return nil - return populateBlockObject(blockHash, blk, fullTransactions) + return populateBlockObject( + blockHash, blk, api.getTotalDifficulty(blockHash), fullTransactions + ) - server.rpc("eth_getBlockByNumber") do(blockTag: BlockTag, fullTransactions: bool) -> BlockObject: + server.rpc("eth_getBlockByNumber") do( + blockTag: BlockTag, fullTransactions: bool + ) -> BlockObject: ## Returns information about a block by block number. ## ## blockTag: integer of a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. @@ -147,29 +195,36 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = return nil let blockHash = blk.header.blockHash - return populateBlockObject(blockHash, blk, fullTransactions) + return populateBlockObject( + blockHash, blk, api.getTotalDifficulty(blockHash), fullTransactions + ) server.rpc("eth_syncing") do() -> SyncingStatus: ## Returns SyncObject or false when not syncing. if api.com.syncState != Waiting: let sync = SyncObject( startingBlock: Quantity(api.com.syncStart), - currentBlock : Quantity(api.com.syncCurrent), - highestBlock : Quantity(api.com.syncHighest) + currentBlock: Quantity(api.com.syncCurrent), + highestBlock: Quantity(api.com.syncHighest), ) return SyncingStatus(syncing: true, syncObject: sync) else: return SyncingStatus(syncing: false) proc getLogsForBlock( - chain: ForkedChainRef, - header: Header, - opts: FilterOptions): seq[FilterLog] - {.gcsafe, raises: [RlpError].} = + chain: ForkedChainRef, header: Header, opts: FilterOptions + ): seq[FilterLog] {.gcsafe, raises: [].} = if headerBloomFilter(header, opts.address, opts.topics): - let - receipts = chain.db.getReceipts(header.receiptsRoot) - txs = chain.db.getTransactions(header.txRoot) + let (receipts, txs) = + if api.chain.isInMemory(header.blockHash): + let blk = api.chain.memoryBlock(header.blockHash) + (blk.receipts, blk.blk.transactions) + else: + let rcs = chain.db.getReceipts(header.receiptsRoot).valueOr: + return @[] + let txs = chain.db.getTransactions(header.txRoot).valueOr: + return @[] + (rcs, txs) # Note: this will hit assertion error if number of block transactions # do not match block receipts. # Although this is fine as number of receipts should always match number @@ -184,8 +239,8 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = chain: ForkedChainRef, start: base.BlockNumber, finish: base.BlockNumber, - opts: FilterOptions): seq[FilterLog] - {.gcsafe, raises: [RlpError].} = + opts: FilterOptions, + ): seq[FilterLog] {.gcsafe, raises: [].} = var logs = newSeq[FilterLog]() blockNum = start @@ -193,7 +248,7 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = while blockNum <= finish: let header = chain.headerByNumber(blockNum).valueOr: - return logs + return logs filtered = chain.getLogsForBlock(header, opts) logs.add(filtered) blockNum = blockNum + 1 @@ -228,11 +283,7 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = # Note: if fromHeader.number > toHeader.number, no logs will be # returned. This is consistent with, what other ethereum clients return - return api.chain.getLogsForRange( - blockFrom.number, - blockTo.number, - filterOptions - ) + return api.chain.getLogsForRange(blockFrom.number, blockTo.number, filterOptions) server.rpc("eth_sendRawTransaction") do(txBytes: seq[byte]) -> Hash32: ## Creates new message call transaction or a contract creation for signed transactions. @@ -242,7 +293,7 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = ## Note: Use eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract. let pooledTx = decodePooledTx(txBytes) - txHash = rlpHash(pooledTx) + txHash = rlpHash(pooledTx) api.txPool.add(pooledTx) let res = api.txPool.inPoolAndReason(txHash) @@ -258,9 +309,9 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = ## Returns the return value of executed contract. let header = api.headerFromTag(blockTag).valueOr: - raise newException(ValueError, "Block not found") - res = rpcCallEvm(args, header, api.com).valueOr: - raise newException(ValueError, "rpcCallEvm error: " & $error.code) + raise newException(ValueError, "Block not found") + res = rpcCallEvm(args, header, api.com).valueOr: + raise newException(ValueError, "rpcCallEvm error: " & $error.code) res.output server.rpc("eth_getTransactionReceipt") do(data: Hash32) -> ReceiptObject: @@ -278,17 +329,19 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = if blockhash == zeroHash32: # Receipt in database - let txDetails = api.chain.db.getTransactionKey(txHash) + let txDetails = api.chain.db.getTransactionKey(data).valueOr: + raise newException(ValueError, "TransactionKey not found") if txDetails.index < 0: return nil let header = api.chain.headerByNumber(txDetails.blockNumber).valueOr: raise newException(ValueError, "Block not found") - var tx: Transaction - if not api.chain.db.getTransactionByIndex(header.txRoot, uint16(txDetails.index), tx): + let tx = api.chain.db.getTransactionByIndex( + header.txRoot, uint16(txDetails.index)).valueOr: return nil - - for receipt in api.chain.db.getReceipts(header.receiptsRoot): + let receipts = api.chain.db.getReceipts(header.receiptsRoot).valueOr: + return nil + for receipt in receipts: let gasUsed = receipt.cumulativeGasUsed - prevGasUsed prevGasUsed = receipt.cumulativeGasUsed if idx == txDetails.index: @@ -304,7 +357,9 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = prevGasUsed = receipt.cumulativeGasUsed if txid == idx: - return populateReceipt(receipt, gasUsed, blkdesc.blk.transactions[txid], txid, blkdesc.blk.header) + return populateReceipt( + receipt, gasUsed, blkdesc.blk.transactions[txid], txid, blkdesc.blk.header + ) idx.inc @@ -317,8 +372,341 @@ proc setupServerAPI*(api: ServerAPIRef, server: RpcServer) = ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. ## Returns the amount of gas used. let - header = api.headerFromTag(blockId("latest")).valueOr: + header = api.headerFromTag(blockId("latest")).valueOr: raise newException(ValueError, "Block not found") - gasUsed = rpcEstimateGas(args, header, api.chain.com, DEFAULT_RPC_GAS_CAP).valueOr: + #TODO: change 0 to configureable gas cap + gasUsed = rpcEstimateGas(args, header, api.chain.com, DEFAULT_RPC_GAS_CAP).valueOr: raise newException(ValueError, "rpcEstimateGas error: " & $error.code) Quantity(gasUsed) + + server.rpc("eth_gasPrice") do() -> Web3Quantity: + ## Returns an integer of the current gas price in wei. + w3Qty(calculateMedianGasPrice(api.com.db).uint64) + + server.rpc("eth_accounts") do() -> seq[eth_types.Address]: + ## Returns a list of addresses owned by client. + result = newSeqOfCap[eth_types.Address](ctx.am.numAccounts) + for k in ctx.am.addresses: + result.add k + + server.rpc("eth_getBlockTransactionCountByHash") do(data: Hash32) -> Web3Quantity: + ## Returns the number of transactions in a block from a block matching the given block hash. + ## + ## data: hash of a block + ## Returns integer of the number of transactions in this block. + let blk = api.chain.blockByHash(data).valueOr: + raise newException(ValueError, "Block not found") + + Web3Quantity(blk.transactions.len) + + server.rpc("eth_getBlockTransactionCountByNumber") do( + blockTag: BlockTag + ) -> Web3Quantity: + ## Returns the number of transactions in a block from a block matching the given block number. + ## + ## blockTag: integer of a block number, or the string "latest", "earliest" or "pending", see the default block parameter. + ## Returns integer of the number of transactions in this block. + let blk = api.blockFromTag(blockTag).valueOr: + raise newException(ValueError, "Block not found") + + Web3Quantity(blk.transactions.len) + + server.rpc("eth_getUncleCountByBlockHash") do(data: Hash32) -> Web3Quantity: + ## Returns the number of uncles in a block from a block matching the given block hash. + ## + ## data: hash of a block. + ## Returns integer of the number of uncles in this block. + let blk = api.chain.blockByHash(data).valueOr: + raise newException(ValueError, "Block not found") + + Web3Quantity(blk.uncles.len) + + server.rpc("eth_getUncleCountByBlockNumber") do(blockTag: BlockTag) -> Web3Quantity: + ## Returns the number of uncles in a block from a block matching the given block number. + ## + ## blockTag: integer of a block number, or the string "latest", see the default block parameter. + ## Returns integer of the number of uncles in this block. + let blk = api.blockFromTag(blockTag).valueOr: + raise newException(ValueError, "Block not found") + + Web3Quantity(blk.uncles.len) + + template sign(privateKey: PrivateKey, message: string): seq[byte] = + # message length encoded as ASCII representation of decimal + let msgData = "\x19Ethereum Signed Message:\n" & $message.len & message + @(sign(privateKey, msgData.toBytes()).toRaw()) + + server.rpc("eth_sign") do(data: eth_types.Address, message: seq[byte]) -> seq[byte]: + ## The sign method calculates an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" + len(message) + message))). + ## By adding a prefix to the message makes the calculated signature recognisable as an Ethereum specific signature. + ## This prevents misuse where a malicious DApp can sign arbitrary data (e.g. transaction) and use the signature to impersonate the victim. + ## Note the address to sign with must be unlocked. + ## + ## data: address. + ## message: message to sign. + ## Returns signature. + let + address = data + acc = ctx.am.getAccount(address).tryGet() + + if not acc.unlocked: + raise newException(ValueError, "Account locked, please unlock it first") + sign(acc.privateKey, cast[string](message)) + + server.rpc("eth_signTransaction") do(data: TransactionArgs) -> seq[byte]: + ## Signs a transaction that can be submitted to the network at a later time using with + ## eth_sendRawTransaction + let + address = data.`from`.get() + acc = ctx.am.getAccount(address).tryGet() + + if not acc.unlocked: + raise newException(ValueError, "Account locked, please unlock it first") + + let + accDB = api.ledgerFromTag(blockId("latest")).valueOr: + raise newException(ValueError, "Latest Block not found") + tx = unsignedTx(data, api.chain.db, accDB.getNonce(address) + 1, api.com.chainId) + eip155 = api.com.isEIP155(api.chain.latestNumber) + signedTx = signTransaction(tx, acc.privateKey, eip155) + return rlp.encode(signedTx) + + server.rpc("eth_sendTransaction") do(data: TransactionArgs) -> Hash32: + ## Creates new message call transaction or a contract creation, if the data field contains code. + ## + ## obj: the transaction object. + ## Returns the transaction hash, or the zero hash if the transaction is not yet available. + ## Note: Use eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract. + let + address = data.`from`.get() + acc = ctx.am.getAccount(address).tryGet() + + if not acc.unlocked: + raise newException(ValueError, "Account locked, please unlock it first") + + let + accDB = api.ledgerFromTag(blockId("latest")).valueOr: + raise newException(ValueError, "Latest Block not found") + tx = unsignedTx(data, api.chain.db, accDB.getNonce(address) + 1, api.com.chainId) + eip155 = api.com.isEIP155(api.chain.latestNumber) + signedTx = signTransaction(tx, acc.privateKey, eip155) + networkPayload = + if signedTx.txType == TxEip4844: + if data.blobs.isNone or data.commitments.isNone or data.proofs.isNone: + raise newException(ValueError, "EIP-4844 transaction needs blobs") + if data.blobs.get.len != signedTx.versionedHashes.len: + raise newException(ValueError, "Incorrect number of blobs") + if data.commitments.get.len != signedTx.versionedHashes.len: + raise newException(ValueError, "Incorrect number of commitments") + if data.proofs.get.len != signedTx.versionedHashes.len: + raise newException(ValueError, "Incorrect number of proofs") + NetworkPayload( + blobs: data.blobs.get.mapIt it.NetworkBlob, + commitments: data.commitments.get, + proofs: data.proofs.get, + ) + else: + if data.blobs.isSome or data.commitments.isSome or data.proofs.isSome: + raise newException(ValueError, "Blobs require EIP-4844 transaction") + nil + pooledTx = PooledTransaction(tx: signedTx, networkPayload: networkPayload) + + api.txPool.add(pooledTx) + rlpHash(signedTx) + + server.rpc("eth_getTransactionByHash") do(data: Hash32) -> TransactionObject: + ## Returns the information about a transaction requested by transaction hash. + ## + ## data: hash of a transaction. + ## Returns requested transaction information. + let txHash = data + let res = api.txPool.getItem(txHash) + if res.isOk: + return populateTransactionObject(res.get().tx, Opt.none(Hash32), Opt.none(uint64)) + + let txDetails = api.chain.db.getTransactionKey(txHash).valueOr: + return nil + if txDetails.index < 0: + let + (blockHash, txid) = api.chain.txRecords(txHash) + tx = api.chain.memoryTransaction(txHash).valueOr: + return nil + return populateTransactionObject(tx, Opt.some(blockHash), Opt.some(txid)) + # TODO: include block number + + let header = api.chain.db.getBlockHeader(txDetails.blockNumber).valueOr: + return nil + let tx = api.chain.db.getTransactionByIndex(header.txRoot, uint16(txDetails.index)).valueOr: + return nil + return populateTransactionObject( + tx, + Opt.some(header.blockHash), + Opt.some(header.number), + Opt.some(txDetails.index.uint64), + ) + + server.rpc("eth_getTransactionByBlockHashAndIndex") do( + data: Hash32, quantity: Web3Quantity + ) -> TransactionObject: + ## Returns information about a transaction by block hash and transaction index position. + ## + ## data: hash of a block. + ## quantity: integer of the transaction index position. + ## Returns requested transaction information. + let index = uint64(quantity) + let blk = api.chain.blockByHash(data).valueOr: + return nil + + if index >= uint64(blk.transactions.len): + return nil + + populateTransactionObject( + blk.transactions[index], Opt.some(data), Opt.some(blk.header.number), Opt.some(index) + ) + + server.rpc("eth_getTransactionByBlockNumberAndIndex") do( + quantityTag: BlockTag, quantity: Web3Quantity + ) -> TransactionObject: + ## Returns information about a transaction by block number and transaction index position. + ## + ## quantityTag: a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. + ## quantity: the transaction index position. + ## NOTE : "pending" blockTag is not supported. + let index = uint64(quantity) + let blk = api.blockFromTag(quantityTag).valueOr: + return nil + + if index >= uint64(blk.transactions.len): + return nil + + populateTransactionObject( + blk.transactions[index], Opt.some(blk.header.blockHash), Opt.some(blk.header.number), Opt.some(index) + ) + + server.rpc("eth_getProof") do( + data: eth_types.Address, slots: seq[UInt256], quantityTag: BlockTag + ) -> ProofResponse: + ## Returns information about an account and storage slots (if the account is a contract + ## and the slots are requested) along with account and storage proofs which prove the + ## existence of the values in the state. + ## See spec here: https://eips.ethereum.org/EIPS/eip-1186 + ## + ## data: address of the account. + ## slots: integers of the positions in the storage to return with storage proofs. + ## quantityTag: integer block number, or the string "latest", "earliest" or "pending", see the default block parameter. + ## Returns: the proof response containing the account, account proof and storage proof + let accDB = api.ledgerFromTag(quantityTag).valueOr: + raise newException(ValueError, "Block not found") + + getProof(accDB, data, slots) + + server.rpc("eth_getBlockReceipts") do( + quantityTag: BlockTag + ) -> Opt[seq[ReceiptObject]]: + ## Returns the receipts of a block. + let + header = api.headerFromTag(quantityTag).valueOr: + raise newException(ValueError, "Block not found") + blkHash = header.blockHash + + var + prevGasUsed = GasInt(0) + receipts: seq[Receipt] + recs: seq[ReceiptObject] + txs: seq[Transaction] + index = 0'u64 + + if api.chain.haveBlockAndState(blkHash): + let blkdesc = api.chain.memoryBlock(blkHash) + receipts = blkdesc.receipts + txs = blkdesc.blk.transactions + else: + let receiptList = api.chain.db.getReceipts(header.receiptsRoot).valueOr: + return Opt.none(seq[ReceiptObject]) + for receipt in receiptList: + receipts.add receipt + txs = api.chain.db.getTransactions(header.txRoot).valueOr: + return Opt.none(seq[ReceiptObject]) + + try: + for receipt in receipts: + let gasUsed = receipt.cumulativeGasUsed - prevGasUsed + prevGasUsed = receipt.cumulativeGasUsed + recs.add populateReceipt(receipt, gasUsed, txs[index], index, header) + inc index + return Opt.some(recs) + except CatchableError: + return Opt.none(seq[ReceiptObject]) + + server.rpc("eth_createAccessList") do( + args: TransactionArgs, quantityTag: BlockTag + ) -> AccessListResult: + ## Generates an access list for a transaction. + try: + let header = api.headerFromTag(quantityTag).valueOr: + raise newException(ValueError, "Block not found") + return createAccessList(header, api.com, args) + except CatchableError as exc: + return AccessListResult(error: Opt.some("createAccessList error: " & exc.msg)) + + server.rpc("eth_blobBaseFee") do() -> Web3Quantity: + ## Returns the base fee per blob gas in wei. + let header = api.headerFromTag(blockId("latest")).valueOr: + raise newException(ValueError, "Block not found") + if header.blobGasUsed.isNone: + raise newException(ValueError, "blobGasUsed missing from latest header") + if header.excessBlobGas.isNone: + raise newException(ValueError, "excessBlobGas missing from latest header") + let blobBaseFee = + getBlobBaseFee(header.excessBlobGas.get) * header.blobGasUsed.get.u256 + if blobBaseFee > high(uint64).u256: + raise newException(ValueError, "blobBaseFee is bigger than uint64.max") + return w3Qty blobBaseFee.truncate(uint64) + + server.rpc("eth_getUncleByBlockHashAndIndex") do( + data: Hash32, quantity: Web3Quantity + ) -> BlockObject: + ## Returns information about a uncle of a block by hash and uncle index position. + ## + ## data: hash of block. + ## quantity: the uncle's index position. + ## Returns BlockObject or nil when no block was found. + let index = uint64(quantity) + let blk = api.chain.blockByHash(data).valueOr: + return nil + + if index < 0 or index >= blk.uncles.len.uint64: + return nil + + let + uncle = api.chain.blockByHash(blk.uncles[index].blockHash).valueOr: + return nil + uncleHash = uncle.header.blockHash + + return populateBlockObject( + uncleHash, uncle, api.getTotalDifficulty(uncleHash), false, true + ) + + server.rpc("eth_getUncleByBlockNumberAndIndex") do( + quantityTag: BlockTag, quantity: Web3Quantity + ) -> BlockObject: + # Returns information about a uncle of a block by number and uncle index position. + ## + ## quantityTag: a block number, or the string "earliest", "latest" or "pending", as in the default block parameter. + ## quantity: the uncle's index position. + ## Returns BlockObject or nil when no block was found. + let index = uint64(quantity) + let blk = api.blockFromTag(quantityTag).valueOr: + return nil + + if index < 0 or index >= blk.uncles.len.uint64: + return nil + + let + uncle = api.chain.blockByHash(blk.uncles[index].blockHash).valueOr: + return nil + uncleHash = uncle.header.blockHash + + return populateBlockObject( + uncleHash, uncle, api.getTotalDifficulty(uncleHash), false, true + ) diff --git a/nimbus/rpc/server_api_helpers.nim b/nimbus/rpc/server_api_helpers.nim deleted file mode 100644 index 055bc6609c..0000000000 --- a/nimbus/rpc/server_api_helpers.nim +++ /dev/null @@ -1,116 +0,0 @@ -# Nimbus -# Copyright (c) 2024 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) -# * MIT license ([LICENSE-MIT](LICENSE-MIT)) -# at your option. -# This file may not be copied, modified, or distributed except according to -# those terms. - -{.push raises: [].} - -import - eth/common/[eth_types, eth_types_rlp, transaction_utils], - web3/eth_api_types, - ../constants, - ../transaction - -from ../beacon/web3_eth_conv import w3Qty - -proc toWd(wd: eth_types.Withdrawal): WithdrawalObject = - WithdrawalObject( - index: Quantity(wd.index), - validatorIndex: Quantity wd.validatorIndex, - address: wd.address, - amount: Quantity wd.amount, - ) - -proc toWdList(list: openArray[eth_types.Withdrawal]): seq[WithdrawalObject] = - result = newSeqOfCap[WithdrawalObject](list.len) - for x in list: - result.add toWd(x) - -func toWdList(x: Opt[seq[eth_types.Withdrawal]]): - Opt[seq[WithdrawalObject]] = - if x.isNone: Opt.none(seq[WithdrawalObject]) - else: Opt.some(toWdList x.get) - -proc populateTransactionObject*(tx: Transaction, - optionalHash: Opt[eth_types.Hash32] = Opt.none(eth_types.Hash32), - optionalNumber: Opt[eth_types.BlockNumber] = Opt.none(eth_types.BlockNumber), - txIndex: Opt[uint64] = Opt.none(uint64)): TransactionObject = - result = TransactionObject() - result.`type` = Opt.some Quantity(tx.txType) - result.blockHash = optionalHash - result.blockNumber = w3Qty(optionalNumber) - - if (let sender = tx.recoverSender(); sender.isOk): - result.`from` = sender[] - result.gas = Quantity(tx.gasLimit) - result.gasPrice = Quantity(tx.gasPrice) - result.hash = tx.rlpHash - result.input = tx.payload - result.nonce = Quantity(tx.nonce) - result.to = Opt.some(tx.destination) - if txIndex.isSome: - result.transactionIndex = Opt.some(Quantity(txIndex.get)) - result.value = tx.value - result.v = Quantity(tx.V) - result.r = tx.R - result.s = tx.S - result.maxFeePerGas = Opt.some Quantity(tx.maxFeePerGas) - result.maxPriorityFeePerGas = Opt.some Quantity(tx.maxPriorityFeePerGas) - - if tx.txType >= TxEip2930: - result.chainId = Opt.some(Quantity(tx.chainId)) - result.accessList = Opt.some(tx.accessList) - - if tx.txType >= TxEIP4844: - result.maxFeePerBlobGas = Opt.some(tx.maxFeePerBlobGas) - result.blobVersionedHashes = Opt.some(tx.versionedHashes) - -proc populateBlockObject*(blockHash: eth_types.Hash32, - blk: Block, - fullTx: bool): BlockObject = - template header: auto = blk.header - - result = BlockObject() - result.number = Quantity(header.number) - result.hash = blockHash - result.parentHash = header.parentHash - result.nonce = Opt.some(header.nonce) - result.sha3Uncles = header.ommersHash - result.logsBloom = header.logsBloom - result.transactionsRoot = header.txRoot - result.stateRoot = header.stateRoot - result.receiptsRoot = header.receiptsRoot - result.miner = header.coinbase - result.difficulty = header.difficulty - result.extraData = HistoricExtraData header.extraData - result.mixHash = Hash32 header.mixHash - - # discard sizeof(seq[byte]) of extraData and use actual length - let size = sizeof(eth_types.Header) - sizeof(eth_api_types.Blob) + header.extraData.len - result.size = Quantity(size) - - result.gasLimit = Quantity(header.gasLimit) - result.gasUsed = Quantity(header.gasUsed) - result.timestamp = Quantity(header.timestamp) - result.baseFeePerGas = header.baseFeePerGas - - if fullTx: - for i, tx in blk.transactions: - let txObj = populateTransactionObject(tx, - Opt.some(blockHash), - Opt.some(header.number), Opt.some(i.uint64)) - result.transactions.add txOrHash(txObj) - else: - for i, tx in blk.transactions: - let txHash = rlpHash(tx) - result.transactions.add txOrHash(txHash) - - result.withdrawalsRoot = header.withdrawalsRoot - result.withdrawals = toWdList blk.withdrawals - result.parentBeaconBlockRoot = header.parentBeaconBlockRoot - result.blobGasUsed = w3Qty(header.blobGasUsed) - result.excessBlobGas = w3Qty(header.excessBlobGas) diff --git a/nimbus/sync/beacon.nim b/nimbus/sync/beacon.nim index 8de0125205..463ca070c4 100644 --- a/nimbus/sync/beacon.nim +++ b/nimbus/sync/beacon.nim @@ -18,7 +18,7 @@ import "."/[sync_desc, sync_sched, protocol] logScope: - topics = "beacon" + topics = "beacon sync" type BeaconSyncRef* = RunnerSyncRef[BeaconCtxData,BeaconBuddyData] @@ -28,25 +28,25 @@ type # ------------------------------------------------------------------------------ proc runSetup(ctx: BeaconCtxRef): bool = - worker.setup(ctx) + worker.setup(ctx, "RunSetup") proc runRelease(ctx: BeaconCtxRef) = - worker.release(ctx) + worker.release(ctx, "RunRelease") -proc runDaemon(ctx: BeaconCtxRef) {.async.} = - await worker.runDaemon(ctx) +proc runDaemon(ctx: BeaconCtxRef) {.async: (raises: []).} = + await worker.runDaemon(ctx, "RunDaemon") proc runStart(buddy: BeaconBuddyRef): bool = - worker.start(buddy) + worker.start(buddy, "RunStart") proc runStop(buddy: BeaconBuddyRef) = - worker.stop(buddy) + worker.stop(buddy, "RunStop") proc runPool(buddy: BeaconBuddyRef; last: bool; laps: int): bool = - worker.runPool(buddy, last, laps) + worker.runPool(buddy, last, laps, "RunPool") -proc runPeer(buddy: BeaconBuddyRef) {.async.} = - await worker.runPeer(buddy) +proc runPeer(buddy: BeaconBuddyRef) {.async: (raises: []).} = + await worker.runPeer(buddy, "RunPeer") # ------------------------------------------------------------------------------ # Public functions @@ -57,7 +57,7 @@ proc init*( ethNode: EthereumNode; chain: ForkedChainRef; maxPeers: int; - chunkSize: int; + chunkSize = 0; ): T = var desc = T() desc.initSync(ethNode, maxPeers) @@ -65,13 +65,7 @@ proc init*( desc.ctx.pool.chain = chain desc -proc start*(desc: BeaconSyncRef; resumeOnly = false): bool = - ## Start beacon sync. If `resumeOnly` is set `true` the syncer will only - ## start up if it can resume work, e.g. after being previously interrupted. - if resumeOnly: - desc.ctx.dbLoadSyncStateLayout() - if not desc.ctx.layout.headLocked: - return false +proc start*(desc: BeaconSyncRef): bool = desc.startSync() proc stop*(desc: BeaconSyncRef) = diff --git a/nimbus/sync/beacon/README.md b/nimbus/sync/beacon/README.md index 1ad9f777b8..6dbcab5eb9 100644 --- a/nimbus/sync/beacon/README.md +++ b/nimbus/sync/beacon/README.md @@ -1,169 +1,218 @@ Beacon Sync =========== -According to the merge-first -[glossary](https://notes.status.im/nimbus-merge-first-el?both=#Glossary), -a beacon sync is a "*Sync method that relies on devp2p and eth/6x to fetch -headers and bodies backwards then apply these in the forward direction to the -head state*". +Some definition of terms, and a suggestion of how a beacon sync can be encoded +providing pseudo code is provided by +[Beacon Sync](https://notes.status.im/nimbus-merge-first-el?both=#Beacon-sync). -This [glossary](https://notes.status.im/nimbus-merge-first-el?both=#Glossary) -is used as a naming template for relevant entities described here. When -referred to, names from the glossary are printed **bold**. +In the following, the data domain the Beacon Sync acts upon is explored and +presented. This leads to an implementation description without the help of +pseudo code but rather provides a definition of the sync and domain state +at critical moments. -Syncing blocks is performed in two overlapping phases +For handling block chain imports and related actions, abstraction methods +from the `forked_chain` module will be used (abbreviated **FC**.) The **FC** +entities **base** and **latest** from this module are always printed **bold**. -* loading header chains and stashing them into a separate database table, -* removing headers from the stashed headers chain, fetching the block bodies - the headers refer to and importing/executing them via `persistentBlocks()`. -So this beacon syncer slightly differs from the definition in the -[glossary](https://notes.status.im/nimbus-merge-first-el?both=#Glossary) in -that only headers are stashed on the database table and the block bodies are -fetched in the *forward* direction. +Sync Logic Outline +------------------ -The reason for that behavioural change is that the block bodies are addressed -by the hash of the block headers for fetching. They cannot be fully verified -upon arrival on the cheap (e.g. by a payload hash.) They will be validated not -before imported/executed. So potentially corrupt blocks will be discarded. -They will automatically be re-fetched with other missing blocks in the -*forward* direction. +Here is a simplification of the sync process intended to provide a mental +outline of how it works. +In the following block chain layouts, a left position always stands for an +ancestor of a right one. -Header chains -------------- + 0------C1 (1) -The header chains are the triple of + 0--------L1 (2) + \_______H1 -* a consecutively linked chain of headers starting starting at Genesis -* followed by a sequence of missing headers -* followed by a consecutively linked chain of headers ending up at a - finalised block header (earlier received from the consensus layer) + 0------------------C2 (3) -A sequence *@[h(1),h(2),..]* of block headers is called a *linked chain* if + 0--------------------L2 (4) + \________H2 -* block numbers join without gaps, i.e. *h(n).number+1 == h(n+1).number* -* parent hashes match, i.e. *h(n).hash == h(n+1).parentHash* +where + +* *0* is genesis +* *C1*, *C2* are the *latest* (aka cursor) entities from the **FC** module +* *L1*, *L2*, are updated *latest* entities from the **FC** module +* *H1*, *H2* are block headers (or blocks) that are used as sync targets -General header linked chains layout diagram +At stage *(1)*, there is a chain of imported blocks *[0,C1]* (written as +compact interval of block numbers.) - 0 C D H (1) - o----------------o---------------------o----------------o---> - | <-- linked --> | <-- unprocessed --> | <-- linked --> | +At stage *(2)*, there is a sync request to advance up until block *H1* which +is then fetched from the network along with its ancestors way back until there +is an ancestor within the chain of imported blocks *[0,L1]*. The chain *[0,L1]* +is what the *[0,C1]* has morphed into when the chain of blocks ending at *H1* +finds its ancestor. -Here, the single upper letter symbols *0*, *C*, *D*, *H* denote block numbers. -For convenience, these letters are also identified with its associated block -header or the full blocks. Saying *"the header 0"* is short for *"the header -with block number 0"*. +At stage *(3)* all blocks recently fetched have now been imported via **FC**. +In addition to that, there might have been additional imports from other +entities (e.g. `newPayload`) which has advanced *H1* further to *C2*. -Meaning of *0*, *C*, *D*, *H*: +Stage *(3)* has become similar to stage *(1)* with *C1* renamed as *C2*, ditto +for the symbols *L2* and *H2* for stage *(4)*. -* *0* -- Genesis, block number number *0* -* *C* -- coupler, maximal block number of linked chain starting at *0* -* *D* -- dangling, minimal block number of linked chain ending at *H* - with *C <= D* -* *H* -- head, end block number of **consensus head** (not necessarily the - latest one as this is moving while processing) -This definition implies *0 <= C <= D <= H* and the state of the header linked -chains can uniquely be described by the triple of block numbers *(C,D,H)*. +Implementation, The Gory Details +-------------------------------- +### Description of Sync State -### Storage of header chains: +The following diagram depicts a most general state view of the sync and the +*FC* modules and at a given point of time -Some block numbers from the closed interval (including end points) *[0,C]* may -correspond to finalised blocks, e.g. the sub-interval *[0,**base**]* where -**base** is the block number of the ledger state. The headers for -*[0,**base**]* are stored in the persistent state database. The headers for the -half open interval *(**base**,C]* are always stored on the *beaconHeader* -column of the *KVT* database. + 0 B L (5) + o---------o----------o + | <--- imported ---> | + C D H + o---------------------o----------------o + | <-- unprocessed --> | <-- linked --> | -The block numbers from the interval *[D,H]* also reside on the *beaconHeader* -column of the *KVT* database table. +where +* *B* -- **base**, current value of this entity (with the same name) of the + **FC** module (i.e. the current value when looked up.) -### Header linked chains initialisation: +* *C* -- coupler, parent of the left endpoint of the chain of headers or blocks + to be fetched and imported. The block number of *C* is somewhere + between the ones of *B* and *C* inclusive. -Minimal layout on a pristine system +* *L* -- **latest**, current value of this entity (with the same name) of the + **FC** module (i.e. the current value when looked up.) *L* need not + be a parent of any header of the linked chain `(C,H]` (see below for + notation). Both *L* and *H* might be heads of different forked chains. - 0 (2) - C - D - H - o---> +* *D* -- dangling, header with the least block number of the linked chain in + progress ending at *H*. This variable is used to record the download + state eventually reaching *Y* (for notation *D< - | <-- linked --> | + For blocks or headers A and B, writing A <- B stands for the block + A be parent of B (there can only be one parent of B.) -New layout with moving *D* and *H* to *T* + For blocks or headers A and B, A is said ancestor of, or equal to B + if A == B or there is a non-empty parent lineage A <- X <- Y <-..<- B. + Notation: A << B (note that << is an equivalence relation.) - D' (4) - 0 C H' - o----------------o---------------------o----> - | <-- linked --> | <-- unprocessed --> | + The compact interval notation [A,B] stands for the set {X|A< | -Block chain import/execution ------------------------------ +where *H<=L* (*H* needs only be known by its block number.) The state +parameters *C* and *D* are irrelevant here. -The following diagram with a partially imported/executed block chain amends the -layout *(1)*: +Following, there will be a request to advance *H* to a new position as +indicated in the diagram below - 0 B L C D H (5) - o------------o-----o-------o---------------------o----------------o--> - | <-- imported --> | | | | - | <------- linked ------> | <-- unprocessed --> | <-- linked --> | + 0 B (9) + o------------o-------o + | <--- imported ---> | D + C H + o--------------------------------------o + | <----------- unprocessed ----------> | -where +with a new sync state *(C,H,H)*. The parameter *B* is the **base** entity +of the **FC** module. The parameter *C* is a placeholder with *C ~ B*. The +parameter *D* is set to the download start position *H*. + +The syncer then fetches the header chain *(C,H]* from the network. While +iteratively fetching headers, the syncer state *(C,D,H)* will only change on +its second position *D* time after a new header was fetched. + +Having finished downloading then *C~D-1*. The sync state is *(D-1,D,H)*. One +will end up with a situation like + + 0 Y L (10) + o---------------o----o + | <--- imported ---> | + C Z H + o----o---------------------------------o + | <-------------- linked ------------> | + +for some *Y* in *[0,L]* and *Z* in *(C,H]* where *Y< | + Y H + o-----------------------------------o + | <------------ blocks -----------> | + +The blocks *(Y,H]* will then be imported and executed. While this happens, the +internal state of the **FC** might change/reset so that further import becomes +impossible. Even when starting import, the block *Y* might not be in *[0,L]* +anymore due to some internal reset of the **FC** logic. In any of those +cases, sync processing restarts at clause *(8)* by resetting the sync state. + +In case all blocks can be imported, one will will end up at + + 0 Y H L (12) + o-----------------o---------------------------------o---o + | <--- imported --------------------------------------> | + +with *H< | + # + # where `H << L` with `L` is the `latest` (aka cursor) parameter from + # `FC` the logic will be updated to (see clause *(9)* in `README.md`): + # + if h <= c or h <= l: # empty interval `(C,H]` or nothing to do + return idleSyncState + + # See clauses *(9)* and *(10)* in `README.md`: + # :: + # 0 B + # o---------------o----o + # | <--- imported ---> | + # C D H + # o---------------------o----------------o + # | <-- unprocessed --> | <-- linked --> | + # + # where *B* is the **base** entity of the `FC` module and `C` is sort of + # a placehoder with block number equal to *B* at some earlier time (the + # value *B* increases over time.) + # + # It is already known that `C < H` (see first check) + # + if c <= b: # check for `C <= B` as sketched above + + # Case `C < D-1` => not ready yet + if c + 1 < d: + return collectingHeaders + + # Case `C == D-1` => just finished the download + if c + 1 == d: + return finishedHeaders + + # Case `C == D` => see below for general case + + # Case `C == D` => set to import blocks (see *(10)* in `README.md`): + # :: + # 0 L + # o--------------------o + # | <--- imported ---> | + # D + # C H + # o--------------------------------o + # | <-- blocks to be completed --> | + # + # It is known already (see first check) that `L <`H` + # + if c == d: + return processingBlocks + + # Case `B < C` oops: + # :: + # 0 B + # o---------------o----o + # | <--- imported ---> | + # C D H + # o---------------------o----------------o + # | <-- unprocessed --> | <-- linked --> | + # + trace info & ": inconsistent state", + B=(if b == c: "C" else: b.bnStr), + C=(if c == l: "L" else: c.bnStr), + L=(if l == d: "D" else: l.bnStr), + D=(if d == h: "H" else: d.bnStr), + H=h.bnStr + + idleSyncState + +# ------------ + +proc startHibernating(ctx: BeaconCtxRef; info: static[string]) = + ## Clean up target bucket and await a new target. ## - ## Layout (see (3) in README): + ctx.sst.reset # => target.reset, layout.reset + ctx.headersUnprocClear() + ctx.blocksUnprocClear() + ctx.headersStagedQueueClear() + ctx.blocksStagedQueueClear() + ctx.dbHeadersClear() + + ctx.hibernate = true + + trace info & ": suspending syncer", L=ctx.chain.latestNumber.bnStr + + # Update, so it can be followed nicely + ctx.updateMetrics() + + +proc setupCollectingHeaders(ctx: BeaconCtxRef; info: static[string]) = + ## Set up sync target (see clause *(9)* in `README.md`) by modifying + ## layout to: ## :: - ## 0 C==D==H T - ## o----------------o---------------------o----> - ## | <-- linked --> | + ## 0 B + ## o------------o-------o + ## | <--- imported ---> | D + ## C H + ## o-----------------------------------o + ## | <--------- unprocessed ---------> | + ## + ## where *B* is the **base** entity of the `FC` module and `C ~ B`. The + ## parameter `H` is set to the new sync head target `T`. ## - ## or + let + c = ctx.chain.baseNumber() + h = ctx.target.consHead.number + + if c+1 < h: # header chain interval is `(C,H]` + doAssert ctx.headersUnprocTotal() == 0 + doAssert ctx.headersUnprocBorrowed() == 0 + doAssert ctx.headersStagedQueueIsEmpty() + doAssert ctx.blocksUnprocTotal() == 0 + doAssert ctx.blocksUnprocBorrowed() == 0 + doAssert ctx.blocksStagedQueueIsEmpty() + + ctx.sst.layout = SyncStateLayout( + coupler: c, + dangling: h, + final: ctx.target.final, + finalHash: ctx.target.finalHash, + head: h, + lastState: collectingHeaders) # state transition + + # Save this header on the database so it needs not be fetched again from + # somewhere else. + ctx.dbHeadersStash(h, @[rlp.encode(ctx.target.consHead)], info) + + # Save state + ctx.dbStoreSyncStateLayout info + + # Update range + ctx.headersUnprocSet(c+1, h-1) + + # Update, so it can be followed nicely + ctx.updateMetrics() + + # Mark target used, reset for re-fill + ctx.target.changed = false + + trace info & ": new header target", C=c.bnStr, D="H", H="T", T=h.bnStr + + +proc linkIntoFc(ctx: BeaconCtxRef; info: static[string]): bool = + ## Link `(C,H]` into the `FC` logic. If successful, `true` is returned. + ## Otherwise the chain `(C,H]` must be discarded. + ## + ## Condider the following layout (see clause *(10)* in `README.md`): ## :: - ## 0==T C==D==H - ## o----------------o--------------------------> - ## | <-- linked --> | + ## 0 B Y L + ## o-------------o--o----o + ## | <--- imported ----> | + ## C Z H + ## o----o--------------------------------o + ## | <------------- linked ------------> | ## - ## with `T == target.consHead.number` or `T == 0` + ## for some `Y` in `[B,L]` and `Z` in `(C,H]` where `Y<-Z` with `L` the + ## `latest` and `B` the `base` entity of the `FC` logic. ## - ## to be updated to + ## If there are such `Y <- Z`, then update the sync state to (see chause + ## *(11)* in `README.md`): ## :: - ## 0 C==D D'==H' - ## o----------------o---------------------o----> - ## | <-- linked --> | <-- unprocessed --> | + ## 0 Y + ## o----------------o----o + ## | <--- imported ----> | + ## D + ## C Z H + ## o-o------------------------------o + ## | <-- blocks to be completed --> | + ## + ## where `C==Y`, `(C,H]==[Z,H]`, `C<-Z` + ## + ## Otherwise, if *Z* does not exists then reset to idle state. ## - var target = ctx.target.consHead.number + let + b = ctx.chain.baseNumber() + l = ctx.chain.latestNumber() + c = ctx.layout.coupler + h = ctx.layout.head - # Need: `H < T` and `C == D` - if target != 0 and target <= ctx.layout.head: # violates `H < T` - trace info & ": not applicable", H=ctx.layout.head.bnStr, T=target.bnStr - return + if l < h: + # Try to find a parent in the `FC` data domain. For practical reasons the + # loop does not go further back than the base `B`. Blocks below/older than + # that will not be handled by the `FC`. + for bn in (l+1).countdown(max(b,c)): - if ctx.layout.coupler != ctx.layout.dangling: # violates `C == D` - trace info & ": not applicable", - C=ctx.layout.coupler.bnStr, D=ctx.layout.dangling.bnStr - return + # The syncer cache holds headers for `(C,H]`. It starts with checking + # whether `L<-Z` holds (i.e. `Y==L` can be chosen.) + let + yHash = ctx.dbHeaderParentHash(bn).expect "Hash32" # maybe `Y` + yHdr = ctx.chain.headerByHash(yHash).valueOr: continue # test for `Y` + yNum = yHdr.number # == bn-1 - # Check consistency: `C == D <= H` for maximal `C` => `D == H` - doAssert ctx.layout.dangling == ctx.layout.head + ctx.layout.coupler = yNum # parent of `Z` + ctx.layout.dangling = yNum # .. ditto - let rlpHeader = rlp.encode(ctx.target.consHead) + trace info & ": linked into FC", B=b.bnStr, + C=(if yNum==l: "L" else: yNum.bnStr), L=l.bnStr, H=h.bnStr - ctx.sst.layout = SyncStateLayout( - coupler: ctx.layout.coupler, - couplerHash: ctx.layout.couplerHash, - dangling: target, - danglingParent: ctx.target.consHead.parentHash, - final: ctx.target.final, - finalHash: ctx.target.finalHash, - head: target, - headHash: rlpHeader.keccak256, - headLocked: true) + # Save layout state + ctx.dbStoreSyncStateLayout info - # Save this header on the database so it needs not be fetched again from - # somewhere else. - ctx.dbStashHeaders(target, @[rlpHeader]) + # Update, so it can be followed nicely + ctx.updateMetrics() + return true - # Save state - ctx.dbStoreSyncStateLayout() + trace info & ": cannot link into FC", B=b.bnStr, L=l.bnStr, + C=c.bnStr, H=h.bnStr + false - # Update range + +proc setupProcessingBlocks(ctx: BeaconCtxRef; info: static[string]) = doAssert ctx.headersUnprocTotal() == 0 doAssert ctx.headersUnprocBorrowed() == 0 doAssert ctx.headersStagedQueueIsEmpty() - ctx.headersUnprocSet(ctx.layout.coupler+1, ctx.layout.dangling-1) + doAssert ctx.blocksUnprocTotal() == 0 + doAssert ctx.blocksUnprocBorrowed() == 0 + doAssert ctx.blocksStagedQueueIsEmpty() - trace info & ": updated", C=ctx.layout.coupler.bnStr, - uTop=ctx.headersUnprocTop(), - D=ctx.layout.dangling.bnStr, H=ctx.layout.head.bnStr, T=target.bnStr + let + c = ctx.layout.coupler + h = ctx.layout.head + # Update blocks `(C,H]` + ctx.blocksUnprocCommit(0, c+1, h) -proc mergeAdjacentChains(ctx: BeaconCtxRef; info: static[string]) = - ## Merge if `C+1` == `D` - ## - if ctx.layout.coupler+1 < ctx.layout.dangling or # gap btw. `C` & `D` - ctx.layout.coupler == ctx.layout.dangling: # merged already - return + # State transition + ctx.layout.lastState = processingBlocks - # No overlap allowed! - doAssert ctx.layout.coupler+1 == ctx.layout.dangling - - # Verify adjacent chains - if ctx.layout.couplerHash != ctx.layout.danglingParent: - # FIXME: Oops -- any better idea than to defect? - raiseAssert info & ": hashes do not match" & - " C=" & ctx.layout.coupler.bnStr & " D=" & $ctx.layout.dangling.bnStr - - trace info & ": merging", C=ctx.layout.coupler.bnStr, - D=ctx.layout.dangling.bnStr - - # Merge adjacent linked chains - ctx.sst.layout = SyncStateLayout( - coupler: ctx.layout.head, # `C` - couplerHash: ctx.layout.headHash, - dangling: ctx.layout.head, # `D` - danglingParent: ctx.dbPeekParentHash(ctx.layout.head).expect "Hash32", - final: ctx.layout.final, # `F` - finalHash: ctx.layout.finalHash, - head: ctx.layout.head, # `H` - headHash: ctx.layout.headHash, - headLocked: ctx.layout.headLocked) - - # Save state - ctx.dbStoreSyncStateLayout() + trace info & ": collecting block bodies", iv=BnRange.new(c+1, h) # ------------------------------------------------------------------------------ # Public functions # ------------------------------------------------------------------------------ -proc updateSyncStateLayout*(ctx: BeaconCtxRef; info: static[string]) = - ## Update layout - - # Check whether the target has been reached. In that case, unlock the - # consensus head `H` from the current layout so that it can be updated - # in time. - if ctx.layout.headLocked: - # So we have a session - let latest= ctx.chain.latestNumber() - if ctx.layout.head <= latest: - doAssert ctx.layout.head == latest - ctx.layout.headLocked = false - - # Check whether there is something to do regarding beacon node change - if not ctx.layout.headLocked and ctx.target.changed and ctx.target.final != 0: - ctx.target.changed = false - ctx.updateTargetChange info - - # Check whether header downloading is done - ctx.mergeAdjacentChains info - - -proc updateBlockRequests*(ctx: BeaconCtxRef; info: static[string]) = - ## Update block requests if there staged block queue is empty - let latest = ctx.chain.latestNumber() - if latest < ctx.layout.coupler: # so half open interval `(L,C]` is not empty - - # One can fill/import/execute blocks by number from `(L,C]` - if ctx.blk.topRequest < ctx.layout.coupler: - # So there is some space - trace info & ": updating", L=latest.bnStr, - topReq=ctx.blk.topRequest.bnStr, C=ctx.layout.coupler.bnStr +proc updateSyncState*(ctx: BeaconCtxRef; info: static[string]) = + ## Update internal state when needed + let + prevState = ctx.layout.lastState # previous state + thisState = ctx.syncState info # currently observed state + + if thisState == prevState: + # Check whether the system has been idle and a new header download + # session can be set up + if prevState == idleSyncState and + ctx.target.changed and # and there is a new target from CL + ctx.target.final != 0: # .. ditto + ctx.setupCollectingHeaders info # set up new header sync + return + # Notreached + + trace info & ": sync state changed", prevState, thisState, + L=ctx.chain.latestNumber.bnStr, + C=(if ctx.layout.coupler == ctx.layout.dangling: "D" + else: ctx.layout.coupler.bnStr), + D=(if ctx.layout.dangling == ctx.layout.head: "H" + else: ctx.layout.dangling.bnStr), + H=ctx.layout.head.bnStr + + # So there is a states transition. The only relevant transition here + # is `collectingHeaders -> finishedHeaders` which will be continued + # as `finishedHeaders -> processingBlocks`. + # + if prevState == collectingHeaders and + thisState == finishedHeaders and + ctx.linkIntoFc(info): # commit downloading headers + ctx.setupProcessingBlocks info # start downloading block bodies + trace info & ": sync state changed", + prevState=thisState, thisState=ctx.syncState(info) + return + # Notreached - ctx.blocksUnprocCommit( - 0, max(latest, ctx.blk.topRequest) + 1, ctx.layout.coupler) - ctx.blk.topRequest = ctx.layout.coupler + # Final sync target reached or inconsistent/impossible state + ctx.startHibernating info -proc updateMetrics*(ctx: BeaconCtxRef) = - let now = Moment.now() - if ctx.pool.nextUpdate < now: - ctx.updateMetricsImpl() - ctx.pool.nextUpdate = now + metricsUpdateInterval +proc updateFinalBlockHeader*( + ctx: BeaconCtxRef; + finHdr: Header; + finHash: Hash32; + info: static[string]; + ) = + ## Update the finalised header cache. If the finalised header is acceptable, + ## the syncer will be activated from hibernation if necessary. + ## + let + b = ctx.chain.baseNumber() + f = finHdr.number + if f < b: + trace info & ": finalised block # too low", + B=b.bnStr, finalised=f.bnStr, delta=(b - f) + + ctx.target.reset + + else: + ctx.target.final = f + ctx.target.finalHash = finHash + + # Activate running (unless done yet) + if ctx.hibernate: + ctx.hibernate = false + trace info & ": activating syncer", B=b.bnStr, + finalised=f.bnStr, head=ctx.target.consHead.bnStr + + # Update, so it can be followed nicely + ctx.updateMetrics() # ------------------------------------------------------------------------------ # End diff --git a/nimbus/sync/beacon/worker/update/metrics.nim b/nimbus/sync/beacon/worker/update/metrics.nim index 5012defadd..48c0160876 100644 --- a/nimbus/sync/beacon/worker/update/metrics.nim +++ b/nimbus/sync/beacon/worker/update/metrics.nim @@ -11,10 +11,12 @@ {.push raises:[].} import - pkg/metrics, + pkg/[chronos, metrics], ../../../../core/chain, ../../worker_desc, - ".."/[blocks_staged, headers_staged] + ../blocks_staged/staged_queue, + ../headers_staged/staged_queue, + ".."/[blocks_unproc, headers_unproc] declareGauge beacon_base, "" & "Max block number of imported finalised blocks" @@ -28,9 +30,6 @@ declareGauge beacon_coupler, "" & declareGauge beacon_dangling, "" & "Starting/min block number for higher up headers chain" -declareGauge beacon_final, "" & - "Max number of finalised block in higher up headers chain" - declareGauge beacon_head, "" & "Ending/max block number of higher up headers chain" @@ -55,12 +54,11 @@ declareGauge beacon_buddies, "" & "Number of currently active worker instances" -template updateMetricsImpl*(ctx: BeaconCtxRef) = +template updateMetricsImpl(ctx: BeaconCtxRef) = metrics.set(beacon_base, ctx.chain.baseNumber().int64) metrics.set(beacon_latest, ctx.chain.latestNumber().int64) metrics.set(beacon_coupler, ctx.layout.coupler.int64) metrics.set(beacon_dangling, ctx.layout.dangling.int64) - metrics.set(beacon_final, ctx.layout.final.int64) metrics.set(beacon_head, ctx.layout.head.int64) metrics.set(beacon_target, ctx.target.consHead.number.int64) @@ -74,4 +72,12 @@ template updateMetricsImpl*(ctx: BeaconCtxRef) = metrics.set(beacon_buddies, ctx.pool.nBuddies) +# --------------- + +proc updateMetrics*(ctx: BeaconCtxRef) = + let now = Moment.now() + if ctx.pool.nextUpdate < now: + ctx.updateMetricsImpl() + ctx.pool.nextUpdate = now + metricsUpdateInterval + # End diff --git a/nimbus/sync/beacon/worker_config.nim b/nimbus/sync/beacon/worker_config.nim index 6a616332cd..f41029e65f 100644 --- a/nimbus/sync/beacon/worker_config.nim +++ b/nimbus/sync/beacon/worker_config.nim @@ -14,7 +14,7 @@ import pkg/chronos const - enableTicker* = true + enableTicker* = false ## Log regular status updates similar to metrics. Great for debugging. runsThisManyPeersOnly* = 8 @@ -43,7 +43,7 @@ const workerIdleWaitInterval* = chronos.seconds(10) ## Sleep some time in multi-mode if there is nothing to do - asyncThreadSwitchTimeSlot* = chronos.nanoseconds(10) + asyncThreadSwitchTimeSlot* = chronos.nanoseconds(1) ## Nano-sleep to allows pseudo/async thread switch # ---------------------- @@ -92,8 +92,8 @@ const nFetchBodiesRequest* = 128 ## Similar to `nFetchHeadersRequest` - fetchBodiesReqThresholdZombie* = chronos.seconds(2) - fetchBodiesReqThresholdCount* = 3 + fetchBodiesReqThresholdZombie* = chronos.seconds(4) + fetchBodiesReqThresholdCount* = 5 ## Similar to `fetchHeadersReqThreshold*` fetchBodiesReqMinResponsePC* = 10 diff --git a/nimbus/sync/beacon/worker_desc.nim b/nimbus/sync/beacon/worker_desc.nim index d462f107a2..5f23ab8e7d 100644 --- a/nimbus/sync/beacon/worker_desc.nim +++ b/nimbus/sync/beacon/worker_desc.nim @@ -52,8 +52,24 @@ type ## Block request item sorted by least block number (i.e. from `blocks[0]`.) blocks*: seq[EthBlock] ## List of blocks for import + KvtCache* = Table[BlockNumber,seq[byte]] + ## This cache type is intended for holding block headers that cannot be + ## reliably saved persistently. This is the situation after blocks are + ## imported as the FCU handlers always maintain a positive transaction + ## level and in some instances the current transaction is flushed and + ## re-opened. + ## + ## The number of block headers to hold in memory after block import has + ## started is the distance to the new `canonical execution head`. + # ------------------- + SyncLayoutState* = enum + idleSyncState = 0 ## see clause *(8)*, *(12)* of `README.md` + collectingHeaders ## see clauses *(5)*, *(9)* of `README.md` + finishedHeaders ## see clause *(10)* of `README.md` + processingBlocks ## see clause *(11)* of `README.md` + SyncStateTarget* = object ## Beacon state to be implicitely updated by RPC method locked*: bool ## Don't update while fetching header @@ -64,36 +80,33 @@ type SyncStateLayout* = object ## Layout of a linked header chains defined by the triple `(C,D,H)` as - ## described in the `README.md` text. + ## described in clause *(5)* of the `README.md` text. ## :: - ## 0 B L C D F H - ## o----------o-----o-------o---------------------o------------o---o---> - ## | <- imported -> | | | | - ## | <------ linked ------> | <-- unprocessed --> | <-- linked --> | + ## 0 B L + ## o---------o----------o + ## | <--- imported ---> | + ## C D H + ## o---------------------o----------------o + ## | <-- unprocessed --> | <-- linked --> | ## ## Additional positions known but not declared in this descriptor: - ## * `B`: base state (from `forked_chain` importer) - ## * `L`: last imported block, canonical consensus head - ## * `F`: finalised head (from CL) + ## * `B`: `base` parameter from `FC` logic + ## * `L`: `latest` (aka cursor) parameter from `FC` logic ## - coupler*: BlockNumber ## Right end `C` of linked chain `[0,C]` - couplerHash*: Hash32 ## Hash of `C` - + coupler*: BlockNumber ## Bottom end `C` of full chain `(C,H]` dangling*: BlockNumber ## Left end `D` of linked chain `[D,H]` - danglingParent*: Hash32 ## Parent hash of `D` + head*: BlockNumber ## `H`, block num of some finalised block + lastState*: SyncLayoutState ## Last known layout state + # Legacy entries, will be removed some time. This is currently needed + # for importing blocks into `FC` the support of which will be deprecated. final*: BlockNumber ## Finalised block number `F` finalHash*: Hash32 ## Hash of `F` - head*: BlockNumber ## `H`, block num of some finalised block - headHash*: Hash32 ## Hash of `H` - headLocked*: bool ## No need to update `H` yet - SyncState* = object ## Sync state for header and block chains target*: SyncStateTarget ## Consensus head, see `T` in `README.md` layout*: SyncStateLayout ## Current header chains layout - lastLayout*: SyncStateLayout ## Previous layout (for delta update) # ------------------- @@ -107,7 +120,6 @@ type ## Block sync staging area unprocessed*: BnRangeSet ## Blocks download requested borrowed*: uint64 ## Total of temp. fetched ranges - topRequest*: BlockNumber ## Max requested block number staged*: StagedBlocksQueue ## Blocks ready for import # ------------------- @@ -133,12 +145,13 @@ type # Blocks import/execution settings for importing with # `nBodiesBatch` blocks in each round (minimum value is # `nFetchBodiesRequest`.) - chain*: ForkedChainRef ## Database - importRunningOk*: bool ## Advisory lock, fetch vs. import + chain*: ForkedChainRef ## Core database, FCU support + stash*: KvtCache ## Temporary header and state table + blockImportOk*: bool ## Don't fetch data while block importing nBodiesBatch*: int ## Default `nFetchBodiesBatchDefault` blocksStagedQuLenMax*: int ## Default `blocksStagedQueueLenMaxDefault` - # Info stuff, no functional contribution + # Info & debugging stuff, no functional contribution nReorg*: int ## Number of reorg invocations (info only) # Debugging stuff @@ -179,10 +192,30 @@ func chain*(ctx: BeaconCtxRef): ForkedChainRef = ## Getter ctx.pool.chain +func stash*(ctx: BeaconCtxRef): var KvtCache = + ## Getter + ctx.pool.stash + func db*(ctx: BeaconCtxRef): CoreDbRef = ## Getter ctx.pool.chain.db +# ----- + +func hibernate*(ctx: BeaconCtxRef): bool = + ## Getter, re-interpretation of the daemon flag for reduced service mode + # No need for running the daemon with reduced service mode. So it is + # convenient to use this flag for indicating this. + not ctx.daemon + +proc `hibernate=`*(ctx: BeaconCtxRef; val: bool) = + ## Setter + ctx.daemon = not val + + # Control some error messages on the scheduler (e.g. zombie/banned-peer + # reconnection attempts, LRU flushing out oldest peer etc.) + ctx.noisyLog = not val + # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/handlers/eth.nim b/nimbus/sync/handlers/eth.nim index d863cfd80d..c3e8a22351 100644 --- a/nimbus/sync/handlers/eth.nim +++ b/nimbus/sync/handlers/eth.nim @@ -58,27 +58,33 @@ proc notImplemented(name: string) {.used.} = proc successorHeader(db: CoreDbRef, h: Header, - output: var Header, - skip = 0'u): bool = + skip = 0'u): Opt[Header] = let offset = 1 + skip.BlockNumber if h.number <= (not 0.BlockNumber) - offset: - result = db.getBlockHeader(h.number + offset, output) + let header = db.getBlockHeader(h.number + offset).valueOr: + return Opt.none(Header) + return Opt.some(header) + Opt.none(Header) proc ancestorHeader(db: CoreDbRef, h: Header, - output: var Header, - skip = 0'u): bool = + skip = 0'u): Opt[Header] = let offset = 1 + skip.BlockNumber if h.number >= offset: - result = db.getBlockHeader(h.number - offset, output) + let header = db.getBlockHeader(h.number - offset).valueOr: + return Opt.none(Header) + return Opt.some(header) + Opt.none(Header) proc blockHeader(db: CoreDbRef, - b: BlockHashOrNumber, - output: var Header): bool = - if b.isHash: - db.getBlockHeader(b.hash, output) - else: - db.getBlockHeader(b.number, output) + b: BlockHashOrNumber): Opt[Header] = + let header = if b.isHash: + db.getBlockHeader(b.hash).valueOr: + return Opt.none(Header) + else: + db.getBlockHeader(b.number).valueOr: + return Opt.none(Header) + Opt.some(header) # ------------------------------------------------------------------------------ # Private functions: peers related functions @@ -292,45 +298,37 @@ proc new*(_: type EthWireRef, method getStatus*(ctx: EthWireRef): Result[EthState, string] {.gcsafe.} = - try: - let - db = ctx.db - com = ctx.chain.com - bestBlock = db.getCanonicalHead() - forkId = com.forkId(bestBlock.number, bestBlock.timestamp) - - return ok(EthState( - totalDifficulty: db.headTotalDifficulty, - genesisHash: com.genesisHash, - bestBlockHash: bestBlock.blockHash, - forkId: ChainForkId( - forkHash: forkId.crc.toBytesBE, - forkNext: forkId.nextFork - ) - )) - except EVMError as exc: - # TODO: Why an EVM Error in database? - return err(exc.msg) - except RlpError as exc: - return err(exc.msg) + let + db = ctx.db + com = ctx.chain.com + bestBlock = ?db.getCanonicalHead() + forkId = com.forkId(bestBlock.number, bestBlock.timestamp) + + return ok(EthState( + totalDifficulty: db.headTotalDifficulty, + genesisHash: com.genesisHash, + bestBlockHash: bestBlock.blockHash, + forkId: ChainForkId( + forkHash: forkId.crc.toBytesBE, + forkNext: forkId.nextFork + ) + )) method getReceipts*(ctx: EthWireRef, hashes: openArray[Hash32]): Result[seq[seq[Receipt]], string] {.gcsafe.} = - try: - let db = ctx.db - var header: Header - var list: seq[seq[Receipt]] - for blockHash in hashes: - if db.getBlockHeader(blockHash, header): - list.add db.getReceipts(header.receiptsRoot) - else: - list.add @[] - trace "handlers.getReceipts: blockHeader not found", blockHash - return ok(list) - except RlpError as exc: - return err(exc.msg) + let db = ctx.db + var list: seq[seq[Receipt]] + for blockHash in hashes: + let header = db.getBlockHeader(blockHash).valueOr: + list.add @[] + trace "handlers.getReceipts: blockHeader not found", blockHash + continue + let receiptList = ?db.getReceipts(header.receiptsRoot) + list.add receiptList + + return ok(list) method getPooledTxs*(ctx: EthWireRef, hashes: openArray[Hash32]): @@ -356,39 +354,35 @@ method getBlockBodies*(ctx: EthWireRef, Result[seq[BlockBody], string] {.gcsafe.} = let db = ctx.db - var body: BlockBody var list: seq[BlockBody] for blockHash in hashes: - if db.getBlockBody(blockHash, body): - list.add body - else: + let body = db.getBlockBody(blockHash).valueOr: list.add BlockBody() trace "handlers.getBlockBodies: blockBody not found", blockHash + continue + list.add body + return ok(list) method getBlockHeaders*(ctx: EthWireRef, req: EthBlocksRequest): Result[seq[Header], string] {.gcsafe.} = - try: - let db = ctx.db - var foundBlock: Header - var list = newSeqOfCap[Header](req.maxResults) - - if db.blockHeader(req.startBlock, foundBlock): - list.add foundBlock - - while uint64(list.len) < req.maxResults: - if not req.reverse: - if not db.successorHeader(foundBlock, foundBlock, req.skip): - break - else: - if not db.ancestorHeader(foundBlock, foundBlock, req.skip): - break - list.add foundBlock + let db = ctx.db + var list = newSeqOfCap[Header](req.maxResults) + var foundBlock = db.blockHeader(req.startBlock).valueOr: return ok(list) - except RlpError as exc: - return err(exc.msg) + list.add foundBlock + + while uint64(list.len) < req.maxResults: + if not req.reverse: + foundBlock = db.successorHeader(foundBlock, req.skip).valueOr: + break + else: + foundBlock = db.ancestorHeader(foundBlock, req.skip).valueOr: + break + list.add foundBlock + return ok(list) method handleAnnouncedTxs*(ctx: EthWireRef, peer: Peer, diff --git a/nimbus/sync/protocol/eth68.nim b/nimbus/sync/protocol/eth68.nim index 4163642d68..a4722fbb32 100644 --- a/nimbus/sync/protocol/eth68.nim +++ b/nimbus/sync/protocol/eth68.nim @@ -12,9 +12,6 @@ ## This module implements Ethereum Wire Protocol version 67, `eth/67`. ## Specification: ## `eth/68 `_ -## -## Use NIM command line optipn `-d:p2pProtocolDebug` for dumping the -## generated driver code (just to have it stored somewhere lest one forgets.) import stint, diff --git a/nimbus/sync/protocol/snap1.nim b/nimbus/sync/protocol/snap1.nim index 4ca15c6381..9d5925910e 100644 --- a/nimbus/sync/protocol/snap1.nim +++ b/nimbus/sync/protocol/snap1.nim @@ -12,9 +12,6 @@ ## This module implements Ethereum Snapshot Protocol version 1, `snap/1`. ## Specification: ## `snap/1 `_ -## -## Use NIM command line optipn `-d:p2pProtocolDebug` for dumping the -## generated driver code (just to have it stored somewhere lest one forgets.) import std/options, diff --git a/nimbus/sync/sync_desc.nim b/nimbus/sync/sync_desc.nim index 8cae2da1b5..c1c179b3d1 100644 --- a/nimbus/sync/sync_desc.nim +++ b/nimbus/sync/sync_desc.nim @@ -38,7 +38,7 @@ type CtxRef*[S] = ref object ## Shared state among all syncing peer workers (aka buddies.) - buddiesMax*: int ## Max number of buddies + noisyLog*: bool ## Hold back `trace` and `debug` msgs if `false` poolMode*: bool ## Activate `runPool()` workers if set `true` daemon*: bool ## Enable global background job pool*: S ## Shared context for all worker peers diff --git a/nimbus/sync/sync_sched.nim b/nimbus/sync/sync_sched.nim index a51e733a4d..c6eb0cc5df 100644 --- a/nimbus/sync/sync_sched.nim +++ b/nimbus/sync/sync_sched.nim @@ -23,7 +23,7 @@ ## *runRelease(ctx: CtxRef[S])* ## Global clean up, done with all the worker peers. ## -## *runDaemon(ctx: CtxRef[S]) {.async.}* +## *runDaemon(ctx: CtxRef[S]) {.async: (raises: []).}* ## Global background job that will be re-started as long as the variable ## `ctx.daemon` is set `true`. If that job was stopped due to re-setting ## `ctx.daemon` to `false`, it will be restarted next after it was reset @@ -56,13 +56,23 @@ ## Note that this function does *not* run in `async` mode. ## ## -## *runPeer(buddy: BuddyRef[S,W]) {.async.}* +## *runPeer(buddy: BuddyRef[S,W]) {.async: (raises: []).}* ## This peer worker method is repeatedly invoked (exactly one per peer) while ## the `buddy.ctrl.poolMode` flag is set `false`. ## ## These peer worker methods run concurrently in `async` mode. ## ## +## These are the control variables that can be set from within the above +## listed method/interface functions. +## +## *buddy.ctx.poolMode* +## Activate `runPool()` workers loop if set `true` (default is `false`.) +## +## *buddy.ctx.daemon* +## Activate `runDaemon()` background job if set `true`(default is `false`.) +## +## ## Additional import files needed when using this template: ## * eth/[common, p2p] ## * chronicles @@ -84,21 +94,28 @@ type ## List of active workers, using `Hash(Peer)` rather than `Peer` KeyedQueue[ENode,RunnerBuddyRef[S,W]] + RunCtrl = enum + terminated = 0 + shutdown + running + RunnerSyncRef*[S,W] = ref object ## Module descriptor ctx*: CtxRef[S] ## Shared data pool: PeerPool ## For starting the system + buddiesMax: int ## Max number of buddies buddies: ActiveBuddies[S,W] ## LRU cache with worker descriptors - daemonRunning: bool ## Run global background job - monitorLock: bool ## Monitor mode is activated - activeMulti: int ## Number of activated runners in multi-mode - shutdown: bool ## Internal shut down flag + daemonRunning: bool ## Running background job (in async mode) + monitorLock: bool ## Monitor mode is activated (non-async mode) + activeMulti: int ## Number of async workers active/running + runCtrl: RunCtrl ## Start/stop control RunnerBuddyRef[S,W] = ref object ## Per worker peer descriptor dsc: RunnerSyncRef[S,W] ## Scheduler descriptor worker: BuddyRef[S,W] ## Worker peer data - zombified: Moment ## When it became undead (if any) + zombified: Moment ## Time when it became undead (if any) + isRunning: bool ## Peer worker is active (in async mode) const zombieTimeToLinger = 20.seconds @@ -119,6 +136,9 @@ const execPoolModeLoopMax = 100 ## Avoids continuous looping + termWaitPollingTime = 10.milliseconds + ## Wait for instance to have terminated for shutdown + # ------------------------------------------------------------------------------ # Private helpers # ------------------------------------------------------------------------------ @@ -140,10 +160,53 @@ proc key(peer: Peer): ENode = # Private functions # ------------------------------------------------------------------------------ -proc daemonLoop[S,W](dsc: RunnerSyncRef[S,W]) {.async.} = +proc terminate[S,W](dsc: RunnerSyncRef[S,W]) = + ## Request termination and wait for sub-tasks to finish + mixin runRelease + + if dsc.runCtrl == running: + # Gracefully shut down async services + dsc.runCtrl = shutdown + dsc.ctx.daemon = false + + # Wait for workers and daemon to have terminated + while 0 < dsc.buddies.len: + for w in dsc.buddies.nextPairs: + if w.data.isRunning: + w.data.worker.ctrl.stopped = true + # Activate async job so it can finish + try: + waitFor sleepAsync termWaitPollingTime + except CancelledError: + trace "Shutdown: peer timeout was cancelled", + peer=w.data.worker.peer, nWorkers=dsc.buddies.len + else: + dsc.buddies.del w.key # this is OK to delete + + while dsc.daemonRunning: + # Activate async job so it can finish + try: + waitFor sleepAsync termWaitPollingTime + except CancelledError: + trace "Shutdown: daemon timeout was cancelled", nWorkers=dsc.buddies.len + + # Final shutdown + dsc.ctx.runRelease() + + # Remove call back from pool manager. This comes last as it will + # potentially unlink references which are used in the worker instances + # (e.g. peer for logging.) + dsc.pool.delObserver(dsc) + + # Clean up, free memory from sub-objects + dsc.ctx = CtxRef[S]() + dsc.runCtrl = terminated + + +proc daemonLoop[S,W](dsc: RunnerSyncRef[S,W]) {.async: (raises: []).} = mixin runDaemon - if dsc.ctx.daemon and not dsc.shutdown: + if dsc.ctx.daemon and dsc.runCtrl == running: dsc.daemonRunning = true # Continue until stopped @@ -162,13 +225,20 @@ proc daemonLoop[S,W](dsc: RunnerSyncRef[S,W]) {.async.} = elapsed = Moment.now() - startMoment suspend = if execLoopTimeElapsedMin <= elapsed: execLoopTaskSwitcher else: execLoopTimeElapsedMin - elapsed - await sleepAsync suspend + try: + await sleepAsync suspend + except CancelledError: + # Stop on error (must not end up in busy-loop). If the activation flag + # `dsc.ctx.daemon` remains `true`, the deamon will be re-started from + # the worker loop in due time. + trace "Deamon loop timeout was cancelled", nWorkers=dsc.buddies.len + break # End while dsc.daemonRunning = false -proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} = +proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async: (raises: []).} = mixin runPeer, runPool, runStop let dsc = buddy.dsc @@ -178,7 +248,15 @@ proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} = # Continue until stopped block taskExecLoop: - while worker.ctrl.running and not dsc.shutdown: + buddy.isRunning = true + + proc isShutdown(): bool = + dsc.runCtrl != running + + proc isActive(): bool = + worker.ctrl.running and not isShutdown() + + while isActive(): # Enforce minimum time spend on this loop let startMoment = Moment.now() @@ -191,8 +269,13 @@ proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} = # clear to run as the only activated instance. dsc.monitorLock = true while 0 < dsc.activeMulti: - await sleepAsync execLoopPollingTime - if worker.ctrl.stopped: + try: + await sleepAsync execLoopPollingTime + except CancelledError: + # must not end up in busy-loop + dsc.monitorLock = false + break taskExecLoop + if not isActive(): dsc.monitorLock = false break taskExecLoop @@ -209,6 +292,10 @@ proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} = else: delayed = nil # not executing any final item break # `true` => stop + # Shutdown in progress? + if isShutdown(): + dsc.monitorLock = false + break taskExecLoop if not delayed.isNil: discard delayed.runPool(last=true, laps=count) # final item if not ctx.poolMode: @@ -221,17 +308,22 @@ proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} = # end. So zombies will end up leftish. discard dsc.buddies.lruFetch peer.key - # Peer mode + # Peer worker in async mode dsc.activeMulti.inc # Continue doing something, work a bit await worker.runPeer() dsc.activeMulti.dec + # Check for shutdown + if isShutdown(): + worker.ctrl.stopped = true + break taskExecLoop + # Dispatch daemon sevice if needed if not dsc.daemonRunning and dsc.ctx.daemon: asyncSpawn dsc.daemonLoop() - # Check for termination + # Check for worker termination if worker.ctrl.stopped: break taskExecLoop @@ -241,21 +333,28 @@ proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} = elapsed = Moment.now() - startMoment suspend = if execLoopTimeElapsedMin <= elapsed: execLoopTaskSwitcher else: execLoopTimeElapsedMin - elapsed - await sleepAsync suspend + try: + await sleepAsync suspend + except CancelledError: + trace "Peer loop timeout was cancelled", peer, nWorkers=dsc.buddies.len + break # stop on error (must not end up in busy-loop) # End while # Note that `runStart()` was dispatched in `onPeerConnected()` - if worker.ctrl.running: - # So shutdown was called - worker.ctrl.stopped = true worker.runStop() + buddy.isRunning = false proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) = mixin runStart, runStop + + # Ignore if shutdown is processing + if dsc.runCtrl != running: + return + # Check for known entry (which should not exist.) let - maxWorkers {.used.} = dsc.ctx.buddiesMax + maxWorkers {.used.} = dsc.buddiesMax nPeers {.used.} = dsc.pool.len zombie = dsc.buddies.eq peer.key if zombie.isOk: @@ -263,12 +362,12 @@ proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) = now = Moment.now() ttz = zombie.value.zombified + zombieTimeToLinger if ttz < Moment.now(): - trace "Reconnecting zombie peer ignored", peer, + if dsc.ctx.noisyLog: trace "Reconnecting zombie peer ignored", peer, nPeers, nWorkers=dsc.buddies.len, maxWorkers, canRequeue=(now-ttz) return # Zombie can be removed from the database dsc.buddies.del peer.key - trace "Zombie peer timeout, ready for requeing", peer, + if dsc.ctx.noisyLog: trace "Zombie peer timeout, ready for requeing", peer, nPeers, nWorkers=dsc.buddies.len, maxWorkers # Initialise worker for this peer @@ -279,7 +378,7 @@ proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) = ctrl: BuddyCtrlRef(), peer: peer)) if not buddy.worker.runStart(): - trace "Ignoring useless peer", peer, nPeers, + if dsc.ctx.noisyLog: trace "Ignoring useless peer", peer, nPeers, nWorkers=dsc.buddies.len, maxWorkers buddy.worker.ctrl.zombie = true return @@ -290,12 +389,12 @@ proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) = # # In the past, one could not rely on the peer pool for having the number of # connections limited. - if dsc.ctx.buddiesMax <= dsc.buddies.len: + if dsc.buddiesMax <= dsc.buddies.len: let leastVal = dsc.buddies.shift.value # unqueue first/least item oldest = leastVal.data.worker if oldest.isNil: - trace "Dequeuing zombie peer", + if dsc.ctx.noisyLog: trace "Dequeuing zombie peer", # Fake `Peer` pretty print for `oldest` oldest=("Node[" & $leastVal.key.address & "]"), since=leastVal.data.zombified, nPeers, nWorkers=dsc.buddies.len, @@ -304,16 +403,13 @@ proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) = else: # This could happen if there are idle entries in the table, i.e. # somehow hanging runners. - trace "Peer table full! Dequeuing least used entry", oldest, - nPeers, nWorkers=dsc.buddies.len, maxWorkers + if dsc.ctx.noisyLog: trace "Peer table full! Dequeuing least used entry", + oldest, nPeers, nWorkers=dsc.buddies.len, maxWorkers # Setting to `zombie` will trigger the worker to terminate (if any.) oldest.ctrl.zombie = true # Add peer entry - discard dsc.buddies.lruAppend(peer.key, buddy, dsc.ctx.buddiesMax) - - trace "Running peer worker", peer, nPeers, - nWorkers=dsc.buddies.len, maxWorkers + discard dsc.buddies.lruAppend(peer.key, buddy, dsc.buddiesMax) asyncSpawn buddy.workerLoop() @@ -321,16 +417,16 @@ proc onPeerConnected[S,W](dsc: RunnerSyncRef[S,W]; peer: Peer) = proc onPeerDisconnected[S,W](dsc: RunnerSyncRef[S,W], peer: Peer) = let nPeers = dsc.pool.len - maxWorkers = dsc.ctx.buddiesMax + maxWorkers = dsc.buddiesMax nWorkers = dsc.buddies.len rc = dsc.buddies.eq peer.key if rc.isErr: - debug "Disconnected, unregistered peer", peer, nPeers, nWorkers, maxWorkers - discard + if dsc.ctx.noisyLog: debug "Disconnected, unregistered peer", peer, + nPeers, nWorkers, maxWorkers elif rc.value.worker.isNil: # Re-visiting zombie - trace "Ignore zombie", peer, nPeers, nWorkers, maxWorkers - discard + if dsc.ctx.noisyLog: trace "Ignore zombie", peer, + nPeers, nWorkers, maxWorkers elif rc.value.worker.ctrl.zombie: # Don't disconnect, leave them fall out of the LRU cache. The effect is, # that reconnecting might be blocked, for a while. For few peers cases, @@ -339,12 +435,11 @@ proc onPeerDisconnected[S,W](dsc: RunnerSyncRef[S,W], peer: Peer) = rc.value.worker = nil rc.value.dsc = nil rc.value.zombified = Moment.now() - trace "Disconnected, zombie", peer, nPeers, nWorkers, maxWorkers + if dsc.ctx.noisyLog: trace "Disconnected, zombie", peer, + nPeers, nWorkers, maxWorkers else: rc.value.worker.ctrl.stopped = true # in case it is hanging somewhere dsc.buddies.del peer.key - trace "Disconnected buddy", peer, nPeers, - nWorkers=dsc.buddies.len, maxWorkers # ------------------------------------------------------------------------------ # Public functions @@ -356,47 +451,45 @@ proc initSync*[S,W]( slots: int; ) = ## Constructor - # Leave one extra slot so that it can holds a *zombie* even if all slots # are full. The effect is that a re-connect on the latest zombie will be # rejected as long as its worker descriptor is registered. - dsc.ctx = CtxRef[S](buddiesMax: max(1, slots + 1)) - + dsc.buddiesMax = max(1, slots + 1) dsc.pool = node.peerPool - dsc.buddies.init(dsc.ctx.buddiesMax) + dsc.buddies.init(dsc.buddiesMax) + dsc.ctx = CtxRef[S]() + proc startSync*[S,W](dsc: RunnerSyncRef[S,W]): bool = ## Set up `PeerObserver` handlers and start syncing. mixin runSetup - # Initialise sub-systems - if dsc.ctx.runSetup(): - var po = PeerObserver( - onPeerConnected: - proc(p: Peer) {.gcsafe.} = + + if dsc.runCtrl == terminated: + # Initialise sub-systems + if dsc.ctx.runSetup(): + dsc.runCtrl = running + + var po = PeerObserver( + onPeerConnected: proc(p: Peer) {.gcsafe.} = dsc.onPeerConnected(p), - onPeerDisconnected: - proc(p: Peer) {.gcsafe.} = + onPeerDisconnected: proc(p: Peer) {.gcsafe.} = dsc.onPeerDisconnected(p)) - po.setProtocol eth - dsc.pool.addObserver(dsc, po) - if dsc.ctx.daemon: - asyncSpawn dsc.daemonLoop() - return true + po.setProtocol eth + dsc.pool.addObserver(dsc, po) + if dsc.ctx.daemon: + asyncSpawn dsc.daemonLoop() + return true + proc stopSync*[S,W](dsc: RunnerSyncRef[S,W]) = ## Stop syncing and free peer handlers . - mixin runRelease - dsc.pool.delObserver(dsc) + dsc.terminate() - # Gracefully shut down async services - dsc.shutdown = true - for buddy in dsc.buddies.nextValues: - buddy.worker.ctrl.stopped = true - dsc.ctx.daemon = false - # Final shutdown (note that some workers might still linger on) - dsc.ctx.runRelease() +proc isRunning*[S,W](dsc: RunnerSyncRef[S,W]): bool = + ## Check start/stop state + dsc.runCtrl == running # ------------------------------------------------------------------------------ # End diff --git a/nimbus/tracer.nim b/nimbus/tracer.nim index 0cc7aa1603..469ae79ffb 100644 --- a/nimbus/tracer.nim +++ b/nimbus/tracer.nim @@ -51,14 +51,6 @@ proc toJson*(receipts: seq[Receipt]): JsonNode {.gcsafe.} # Private helpers # ------------------------------------------------------------------------------ -template safeTracer(info: string; code: untyped) = - try: - code - except CatchableError as e: - raiseAssert info & " name=" & $e.name & " msg=" & e.msg - -# ------------------- - proc init( T: type CaptCtxRef; com: CommonRef; @@ -75,9 +67,9 @@ proc init( T: type CaptCtxRef; com: CommonRef; topHeader: Header; - ): T - {.raises: [CatchableError].} = - T.init(com, com.db.getBlockHeader(topHeader.parentHash).stateRoot) + ): T = + let header = com.db.getBlockHeader(topHeader.parentHash).expect("top header parent exists") + T.init(com, header.stateRoot) proc activate(cc: CaptCtxRef): CaptCtxRef {.discardable.} = ## Install/activate new context `cc.ctx`, old one in `cc.restore` @@ -113,10 +105,11 @@ proc toJson(receipt: Receipt): JsonNode = proc dumpReceiptsImpl( chainDB: CoreDbRef; header: Header; - ): JsonNode - {.raises: [CatchableError].} = + ): JsonNode = result = newJArray() - for receipt in chainDB.getReceipts(header.receiptsRoot): + let receiptList = chainDB.getReceipts(header.receiptsRoot). + expect("receipts exists") + for receipt in receiptList: result.add receipt.toJson # ------------------------------------------------------------------------------ @@ -160,8 +153,7 @@ proc traceTransactionImpl( transactions: openArray[Transaction]; txIndex: uint64; tracerFlags: set[TracerFlags] = {}; - ): JsonNode - {.raises: [CatchableError].}= + ): JsonNode = if header.txRoot == EMPTY_ROOT_HASH: return newJNull() @@ -197,14 +189,14 @@ proc traceTransactionImpl( before.captureAccount(stateDb, miner, minerName) stateDb.persist() stateDiff["beforeRoot"] = %(stateDb.getStateRoot().toHex) - discard com.db.ctx.getAccounts.stateRoot(updateOk=true) # lazy hashing! + discard com.db.ctx.getAccounts.getStateRoot() # lazy hashing! stateCtx = CaptCtxRef.init(com, stateDb.getStateRoot()) let rc = vmState.processTransaction(tx, sender, header) gasUsed = if rc.isOk: rc.value else: 0 if idx.uint64 == txIndex: - discard com.db.ctx.getAccounts.stateRoot(updateOk=true) # lazy hashing! + discard com.db.ctx.getAccounts.getStateRoot() # lazy hashing! after.captureAccount(stateDb, sender, senderName) after.captureAccount(stateDb, recipient, recipientName) after.captureAccount(stateDb, miner, minerName) @@ -240,13 +232,11 @@ proc dumpBlockStateImpl( com: CommonRef; blk: EthBlock; dumpState = false; - ): JsonNode - {.raises: [CatchableError].} = + ): JsonNode = template header: Header = blk.header let cc = activate CaptCtxRef.init(com, header) - parent = com.db.getBlockHeader(header.parentHash) # only need a stack dump when scanning for internal transaction address captureFlags = {DisableMemory, DisableStorage, EnableAccount} @@ -308,8 +298,7 @@ proc traceBlockImpl( com: CommonRef; blk: EthBlock; tracerFlags: set[TracerFlags] = {}; - ): JsonNode - {.raises: [CatchableError].} = + ): JsonNode = template header: Header = blk.header let @@ -344,8 +333,7 @@ proc traceTransactionsImpl( com: CommonRef; header: Header; transactions: openArray[Transaction]; - ): JsonNode - {.raises: [CatchableError].} = + ): JsonNode = result = newJArray() for i in 0 ..< transactions.len: result.add traceTransactionImpl( @@ -360,8 +348,7 @@ proc traceBlock*( blk: EthBlock; tracerFlags: set[TracerFlags] = {}; ): JsonNode = - "traceBlock".safeTracer: - result = com.traceBlockImpl(blk, tracerFlags) + com.traceBlockImpl(blk, tracerFlags) proc toJson*(receipts: seq[Receipt]): JsonNode = result = newJArray() @@ -375,8 +362,7 @@ proc dumpMemoryDB*(node: JsonNode, cpt: CoreDbCaptRef) = node["state"] = n proc dumpReceipts*(chainDB: CoreDbRef, header: Header): JsonNode = - "dumpReceipts".safeTracer: - result = chainDB.dumpReceiptsImpl header + chainDB.dumpReceiptsImpl header proc traceTransaction*( com: CommonRef; @@ -385,24 +371,21 @@ proc traceTransaction*( txIndex: uint64; tracerFlags: set[TracerFlags] = {}; ): JsonNode = - "traceTransaction".safeTracer: - result = com.traceTransactionImpl(header, txs, txIndex,tracerFlags) + com.traceTransactionImpl(header, txs, txIndex,tracerFlags) proc dumpBlockState*( com: CommonRef; blk: EthBlock; dumpState = false; ): JsonNode = - "dumpBlockState".safeTracer: - result = com.dumpBlockStateImpl(blk, dumpState) + com.dumpBlockStateImpl(blk, dumpState) proc traceTransactions*( com: CommonRef; header: Header; transactions: openArray[Transaction]; ): JsonNode = - "traceTransactions".safeTracer: - result = com.traceTransactionsImpl(header, transactions) + com.traceTransactionsImpl(header, transactions) # ------------------------------------------------------------------------------ # End diff --git a/nimbus/transaction/host_call_nested.nim b/nimbus/transaction/host_call_nested.nim index c343a3a648..c006555625 100644 --- a/nimbus/transaction/host_call_nested.nim +++ b/nimbus/transaction/host_call_nested.nim @@ -32,7 +32,7 @@ proc beforeExecCreateEvmcNested(host: TransactionHost, gas: GasInt m.gas, sender: m.sender.fromEvmc, value: m.value.fromEvmc, - data: @(makeOpenArray(m.inputData, m.inputSize.int)) + data: @(makeOpenArray(m.input_data, m.input_size.int)) ) return newComputation(host.vmState, false, childMsg, cast[ContractSalt](m.create2_salt)) @@ -68,7 +68,7 @@ proc beforeExecCallEvmcNested(host: TransactionHost, else: host.computation.msg.contractAddress, value: m.value.fromEvmc, - data: @(makeOpenArray(m.inputData, m.inputSize.int)), + data: @(makeOpenArray(m.input_data, m.input_size.int)), flags: m.flags, ) return newComputation(host.vmState, false, childMsg) diff --git a/nimbus/transaction/host_trace.nim b/nimbus/transaction/host_trace.nim index a090f61554..2a743e2a18 100644 --- a/nimbus/transaction/host_trace.nim +++ b/nimbus/transaction/host_trace.nim @@ -115,7 +115,7 @@ proc showEvmcArgsExpr(fn: NimNode, callName: string): auto = if (types[i].repr == "ptr byte" or types[i].repr == "ptr HostTopic") and (i < args.len-1 and types[i+1].repr == "HostSize"): skip = i+1 - arg = newPar(args[i], args[i+1]) + arg = newNimNode(nnkTupleConstr).add(args[i], args[i+1]) msgExpr = quote do: `msgExpr` & `argNameString` & $(`arg`) return (msgExpr, args) diff --git a/nimbus/utils/utils.nim b/nimbus/utils/utils.nim index 8f2b688799..456ba7bed7 100644 --- a/nimbus/utils/utils.nim +++ b/nimbus/utils/utils.nim @@ -28,20 +28,19 @@ template calcWithdrawalsRoot*(withdrawals: openArray[Withdrawal]): Root = template calcReceiptsRoot*(receipts: openArray[Receipt]): Root = orderedTrieRoot(receipts) -func calcRequestsHashInsertType*(requests: varargs[seq[byte]]): Hash32 = - var ctx: sha256 - ctx.init() - for i, data in requests: - ctx.update([i.byte]) # request type +func calcRequestsHash*(requests: varargs[seq[byte]]): Hash32 = + func calcHash(reqType: byte, data: openArray[byte]): Hash32 = + var ctx: sha256 + ctx.init() + ctx.update([reqType]) # request type ctx.update data - ctx.finish(result.data) - ctx.clear() + ctx.finish(result.data) + ctx.clear() -func calcRequestsHash*(requests: varargs[seq[byte]]): Hash32 = var ctx: sha256 ctx.init() for i, data in requests: - ctx.update data + ctx.update(calcHash(i.byte, data).data) ctx.finish(result.data) ctx.clear() diff --git a/nimbus/version.nim b/nimbus/version.nim index bc2aaccd3b..ea467f9274 100644 --- a/nimbus/version.nim +++ b/nimbus/version.nim @@ -7,8 +7,8 @@ # those terms. import - std/[strutils, os, sequtils], - stew/byteutils + std/[strformat, strutils, os, sequtils], + stew/byteutils, ./compile_info const sourcePath = currentSourcePath.rsplit({DirSep, AltSep}, 1)[0] @@ -59,6 +59,9 @@ const GitRevisionBytes* = hexToByteArray[4](GitRevision) - NimVersion* = "Nim version " & $NimMajor & "." & $NimMinor & "." & $NimPatch - FullVersionStr* = "v" & NimbusVersion & "-" & GitRevision + + ClientId* = &"{NimbusName}/{FullVersionStr}/{hostOS}-{hostCPU}/Nim-{NimVersion}/{VmName}" + + ShortClientId* = NimbusName & "/" & FullVersionStr + \ No newline at end of file diff --git a/nimbus_verified_proxy/libverifproxy/verifproxy.nim b/nimbus_verified_proxy/libverifproxy/verifproxy.nim index b30685b14f..ac67e61d79 100644 --- a/nimbus_verified_proxy/libverifproxy/verifproxy.nim +++ b/nimbus_verified_proxy/libverifproxy/verifproxy.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[atomics, json, os, strutils, net], + std/[atomics, json, strutils, net], ../nimbus_verified_proxy, ../nimbus_verified_proxy_conf diff --git a/nimbus_verified_proxy/nimbus_verified_proxy.nim b/nimbus_verified_proxy/nimbus_verified_proxy.nim index 6c2f3b4072..b4374e0822 100644 --- a/nimbus_verified_proxy/nimbus_verified_proxy.nim +++ b/nimbus_verified_proxy/nimbus_verified_proxy.nim @@ -21,7 +21,7 @@ import beacon_chain/spec/beaconstate, beacon_chain/spec/datatypes/[phase0, altair, bellatrix], beacon_chain/[light_client, nimbus_binary_common, version], - ../nimbus/rpc/[cors, server_api_helpers], + ../nimbus/rpc/[cors, rpc_utils], ../nimbus/beacon/payload_conv, ./rpc/rpc_eth_api, ./nimbus_verified_proxy_conf, @@ -147,7 +147,7 @@ proc run*( parentBeaconBlockRoot = Opt.none(Hash32), requestsHash = Opt.none(Hash32), ) - blockCache.add(populateBlockObject(blk.header.rlpHash, blk, true)) + blockCache.add(populateBlockObject(blk.header.rlpHash, blk, 0.u256, true)) except RlpError as exc: debug "Invalid block received", err = exc.msg diff --git a/run-kurtosis-check.sh b/run-kurtosis-check.sh index 2a7a442cd7..b262554ddd 100755 --- a/run-kurtosis-check.sh +++ b/run-kurtosis-check.sh @@ -87,7 +87,7 @@ sed -i "s/el_image: .*/el_image: $new_el_image/" assertoor.yaml sudo kurtosis run \ --enclave nimbus-localtestnet \ - github.com/ethpandaops/ethereum-package@4.3.0 \ + github.com/ethpandaops/ethereum-package \ --args-file assertoor.yaml enclave_dump=$(kurtosis enclave inspect nimbus-localtestnet) diff --git a/scripts/make_states.sh b/scripts/make_states.sh index b050063462..0e6e8feb47 100755 --- a/scripts/make_states.sh +++ b/scripts/make_states.sh @@ -29,7 +29,7 @@ mkdir -p "$DATA_DIR" while true; do - "$SCRIPT_DIR/../build/nimbus" import \ + "$SCRIPT_DIR/../build/nimbus_execution_client" import \ --data-dir:"${DATA_DIR}" \ --era1-dir:"${ERA_DIR}" \ --era-dir:"${ERA1_DIR}" \ diff --git a/tests/all_tests.nim b/tests/all_tests.nim index d2060c922e..30b2257abe 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -15,10 +15,10 @@ cliBuilder: ./test_evm_support, ./test_genesis, ./test_precompiles, + ./test_rpc, ./test_generalstate_json, ./test_tracer_json, #./test_persistblock_json, -- fails - #./test_rpc, -- fails ./test_filters, ./test_op_arith, ./test_op_bit, diff --git a/tests/customgenesis/mekong.json b/tests/customgenesis/mekong.json new file mode 100644 index 0000000000..eb874be4e5 --- /dev/null +++ b/tests/customgenesis/mekong.json @@ -0,0 +1,927 @@ +{ + "config": { + "chainId": 7078815900, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "mergeNetsplitBlock": 0, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "shanghaiTime": 0, + "cancunTime": 0, + "depositContractAddress": "0x4242424242424242424242424242424242424242", + "pragueTime": 1730470704, + "osakaTime": 1768772016 + }, + "alloc": { + "0x0000000000000000000000000000000000000000": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000008": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000009": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000000f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000010": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000011": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000012": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000013": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000014": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000015": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000016": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000017": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000018": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000019": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000001f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000020": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000021": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000022": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000023": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000024": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000025": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000026": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000027": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000028": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000029": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000002f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000030": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000031": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000032": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000033": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000034": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000035": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000036": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000037": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000038": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000039": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000003f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000040": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000041": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000042": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000043": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000044": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000045": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000046": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000047": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000048": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000049": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000004f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000050": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000051": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000052": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000053": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000054": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000055": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000056": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000057": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000058": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000059": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000005f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000060": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000061": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000062": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000063": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000064": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000065": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000066": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000067": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000068": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000069": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000006f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000070": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000071": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000072": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000073": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000074": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000075": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000076": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000077": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000078": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000079": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000007f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000080": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000081": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000082": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000083": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000084": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000085": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000086": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000087": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000088": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000089": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000008f": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000090": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000091": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000092": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000093": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000094": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000095": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000096": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000097": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000098": { + "balance": "1" + }, + "0x0000000000000000000000000000000000000099": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009a": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009b": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009c": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009d": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009e": { + "balance": "1" + }, + "0x000000000000000000000000000000000000009f": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000a9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000aa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ab": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ac": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ad": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ae": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000af": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000b9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ba": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000be": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000bf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000c9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ca": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ce": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000cf": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000d9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000da": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000db": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000dd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000de": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000df": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000e9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ea": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000eb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ec": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ed": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ee": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ef": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f0": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f1": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f2": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f3": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f4": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f5": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f6": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f7": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f8": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000f9": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fa": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fb": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fc": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fd": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000fe": { + "balance": "1" + }, + "0x00000000000000000000000000000000000000ff": { + "balance": "1" + }, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a2646970667358221220dceca8706b29e917dacf25fceef95acac8d90d765ac926663ce4096195952b6164736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + }, + "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500" + }, + "0x0aae40965e6800cd9b1f4b05ff21581047e3f91e": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460575767ffffffffffffffff5f3511605357600143035f3511604b575f35612000014311604b57611fff5f3516545f5260205ff35b5f5f5260205ff35b5f5ffd5b5f35611fff60014303165500" + }, + "0x09Fc772D0857550724b07B850a4323f39112aAaA": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460c7573615156028575f545f5260205ff35b36603814156101f05760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f057600182026001905f5b5f821115608057810190830284830290049160010191906065565b9093900434106101f057600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160db575060105b5f5b81811461017f5780604c02838201600302600401805490600101805490600101549160601b83528260140152807fffffffffffffffffffffffffffffffff0000000000000000000000000000000016826034015260401c906044018160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160dd565b9101809214610191579060025561019c565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101c957505f5b6001546002828201116101de5750505f6101e4565b01600290035b5f555f600155604c025ff35b5f5ffd", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + }, + "0x01aBEa29659e5e97C95107F20bb753cD3e09bBBb": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460cf573615156028575f545f5260205ff35b366060141561019a5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f821115608057810190830284830290049160010191906065565b90939004341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060011160e3575060015b5f5b8181146101295780607402838201600402600401805490600101805490600101805490600101549260601b84529083601401528260340152906054015260010160e5565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + }, + "0x454b0EA7d8aD3C56D0CF2e44Ed97b2Feab4D7AF2": { + "balance": "1000000000000000000000000000" + }, + "0xd3248BA3E5492D767F8e427Cb9C7B9D5C3972D7B": { + "balance": "1000000000000000000000000000" + }, + "0xAD01b55d7c3448B8899862eb335FBb17075d8DE2": { + "balance": "1000000000000000000000000000" + }, + "0x7e454a14B8e7528465eeF86f0DC1da4f235d9D79": { + "balance": "1000000000000000000000000000" + }, + "0x7a40026A3b9A41754a95EeC8c92C6B99886f440C": { + "balance": "1000000000000000000000000000" + }, + "0x8c4D8CDD1f474510Dd70D66F2785a3a38a29AC1A": { + "balance": "1000000000000000000000000000" + }, + "0xfC7360b3b28cf4204268A8354dbEc60720d155D2": { + "balance": "1000000000000000000000000000" + }, + "0x2F7626bBDb8c0f9071bC98046Ef6fDed2167F97F": { + "balance": "1000000000000000000000000000" + }, + "0x752CE31Dec0dde7D1563CdF6438d892De2D4FBee": { + "balance": "1000000000000000000000000000" + }, + "0x455f42d91096c4Aa708D7Cbcb2DC499dE89C402c": { + "balance": "1000000000000000000000000000" + }, + "0x85154341488732D57a97F54AB9706Bc4B71B8636": { + "balance": "1000000000000000000000000000" + }, + "0x6a9CcA73d4Ff3a249fa778C7651f4Df8B9fFa0Df": { + "balance": "1000000000000000000000000000" + }, + "0xee2d0567AAe8080CA269b7908F4aF8BBb59A6804": { + "balance": "1000000000000000000000000000" + }, + "0xDd8D4027078a471816e4Ef7F69aFc0A5d2947dDc": { + "balance": "1000000000000000000000000000" + }, + "0x20466E9A67f299F6056bE52A50ea324FA6Bd05D5": { + "balance": "1000000000000000000000000000" + }, + "0x03F24BB0C9cfb30217Ff992A36ae9230F2A1697f": { + "balance": "1000000000000000000000000000" + }, + "0x032d8372C519c3927b87BDe4479E846a81EF2d10": { + "balance": "1000000000000000000000000000" + }, + "0xF863DF14954df73804b3150F3754a8F98CBB1D0d": { + "balance": "1000000000000000000000000000" + }, + "0xbe918A6aef1920F3706E23d153146aA6C5982620": { + "balance": "1000000000000000000000000000" + }, + "0xA0c7edA3CE474BC670A11EA9537cBEfd36331123": { + "balance": "1000000000000000000000000000" + }, + "0xF03b43BeB861044492Eb43E247bEE2AC6C80c651": { + "balance": "1000000000000000000000000000" + } + }, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x01", + "extraData": "", + "gasLimit": "0x17d7840", + "nonce": "0x1234", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "1730372340" +} diff --git a/tests/engine_api/genesis_base_canonical.json b/tests/engine_api/genesis_base_canonical.json new file mode 100644 index 0000000000..8852c56aa0 --- /dev/null +++ b/tests/engine_api/genesis_base_canonical.json @@ -0,0 +1,23 @@ +{ + "payload": { + "parentHash": "0x13613aa073d32a70ebd3c9f952da2c0f956978b64c1b37c25641c6fecb024ade", + "feeRecipient": "0xf97e180c050e5ab072211ad2c213eb5aee4df134", + "stateRoot": "0xb132115a8ec822fa5316aafd8b7832017e902da9165371fbcd3697103113d50a", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "prevRandao": "0xbcbb53c61f6b8358614a7dcc9b3e00d953342e397c27280933aa9f24c7f13a65", + "blockNumber": "0x1", + "gasLimit": "0x17dd79d", + "gasUsed": "0x0", + "timestamp": "0x6724E329", + "extraData": "0xd883010e0c846765746888676f312e32332e32856c696e7578", + "baseFeePerGas": "0x342770c0", + "blockHash": "0xb1f5a0430be7376caf86a606101c2818d14f419326eec9b8ffa30e9209e148d5", + "transactions": [], + "withdrawals": [], + "blobGasUsed": "0x0", + "excessBlobGas": "0x0" + }, + "expectedBlobVersionedHashes": [], + "parentBeaconBlockRoot": "0x8b21a0a0256067676ec4b20d4118ed5bde170d78b4e9a8db1f7b7640d3b8b7c0" +} \ No newline at end of file diff --git a/tests/engine_api/newPayloadV4_invalid_blockhash.json b/tests/engine_api/newPayloadV4_invalid_blockhash.json new file mode 100644 index 0000000000..5a0d3d866a --- /dev/null +++ b/tests/engine_api/newPayloadV4_invalid_blockhash.json @@ -0,0 +1,42 @@ +{ + "payload": { + "baseFeePerGas": "0x7", + "blobGasUsed": "0x40000", + "blockHash": "0x187307d7dc9beb87af2a1d8340e9f17a3bbe4738963daeaf6d8e13b27b2d6a7f", + "blockNumber": "0x94b2", + "excessBlobGas": "0x0", + "extraData": "0xd883010e0c846765746888676f312e32332e32856c696e7578", + "feeRecipient": "0xf97e180c050e5ab072211ad2c213eb5aee4df134", + "gasLimit": "0x1c9c380", + "gasUsed": "0x33450", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0xb647cb4bb1671b0c619c4c71b40effc80ce96819cb2832b52be6cda03d0e5fee", + "prevRandao": "0x7054943af1e7e26984ce8b44069f82d2a8d43ba4ce8a6ca5332978032db9acd3", + "receiptsRoot": "0x86b0c83ced5201a87641678023531e90eddfacb8e3ce78256bf7417037d387b9", + "stateRoot": "0x4a7f23a99ac14da1af4f1d0ca0b740c8cda0753676f2cf079089a74dcb2d4f24", + "timestamp": "0x671b65c6", + "transactions": [ + "0xf86b8233c8858bb2c972bc82520894d3248ba3e5492d767f8e427cb9c7b9d5c3972d7b01808503479468b7a0db0d13a11dc81e1b9297eb0322f26f6ff3380d7c7ec3929ee839fb7e9961b2ffa02588e1e28146dfc70e475ed89720db3dd7deeb4bdf16d67d33246465ab9fbcc1", + "0xf86b8233c9858bb2c972bc82520894d3248ba3e5492d767f8e427cb9c7b9d5c3972d7b01808503479468b7a005ddb5765e447c0cfe7394804bc868b95ec89e3aad4ce75142b94964648dcbc8a032edba7aa886708e13418b27325de673dd992fe0fc16e2432201320c737a8ebc", + "0xf86b8233ca858bb2c972bc82520894d3248ba3e5492d767f8e427cb9c7b9d5c3972d7b01808503479468b7a058303cc9724abc56bf143ce355f26042fd19dec468783b7dec3144ee378a7870a00d00461b597d96144fed4134cef4399d61f490d7dba3552faf32a7d4ff651956", + "0xf86b8233cb858bb2c972bc82520894d3248ba3e5492d767f8e427cb9c7b9d5c3972d7b01808503479468b7a0687ca518efea103a78c902b6dec99bae14e3d7e7c8c669cf34731ee326d8aa26a0641d4c0f4e51c7031b5d127b109f2f0c3faec4f4af6666aee4ec28576aa989db", + "0xf86b823360858bb2c972bc82520894454b0ea7d8ad3c56d0cf2e44ed97b2feab4d7af201808503479468b8a0ba0ceedcb6ecf00c691f319aa3e290162aa6a7c1904d8d037ff673aa22022069a062d9a5866974973a83ef187e95ab73db1036deca8e0ba8da19399159d8e5aead", + "0xf86b823361858bb2c972bc82520894454b0ea7d8ad3c56d0cf2e44ed97b2feab4d7af201808503479468b8a0ddbf85460a81bd05f260e539565af01e92e9e6727d3d07263bd84b42663cd1cfa0058600ed2e85cd0ccc36519d474def27b04f56e3dd9d8a8bd91c66dda2de8fe9", + "0xf86b823362858bb2c972bc82520894454b0ea7d8ad3c56d0cf2e44ed97b2feab4d7af201808503479468b8a063d1f8b06a59c32d01cbb07e90c6efaedf5ae2d898b2e391293faba38552a784a01dcb02b8273e9db61cdf072ad170bb102bf26d467bb0082796c45dff44c62c2c", + "0xf86b823363858bb2c972bc82520894454b0ea7d8ad3c56d0cf2e44ed97b2feab4d7af201808503479468b7a01481bb0af1c56554c8ce345ba305fa4c80efcf66b9900f75cc2a0b3e76e6014ba04b019b2ed72d9f078dc10485b5948cafa284ac5d7ab15c47925d37ec4477a0ee", + "0x03f89a8501a3ca344a8212cb84773594008504a817c8008252089407e25e7883c6466d579c0df73bff083ce63084e88080c08504a817c800e1a0012b9d1f8cf08c0a807e226a130be45165e48cae7e7be5ee4d3079bc629c487980a0be40054b628bc6f8cdf110cb98064a6128584baea76b283c0a565f716f1a7f39a046566f55da48b57cac3b5acd0f1ee1a8cd07c9ed99aa8a85b8aa0fad02f26824", + "0x03f89a8501a3ca344a8212f584773594008504a817c80082520894a49dfd382668a2d7778fadd68da1bac21d3c78ac8080c08504a817c800e1a00139c61fb5022f854e37742230a76e524166c0f1b2d67a152d4f5a488b5159c101a0dd6b92a173426adbf4052dc6b92a895a14a0f8f80d4007dc2d9a3a9bee5675f8a03b4ed09b34720a911d5a89041f45d76ce817eb073b00b35ae23063963abb572c" + ], + "withdrawals": [] + }, + "expectedBlobVersionedHashes": [ + "0x012b9d1f8cf08c0a807e226a130be45165e48cae7e7be5ee4d3079bc629c4879", + "0x0139c61fb5022f854e37742230a76e524166c0f1b2d67a152d4f5a488b5159c1" + ], + "parentBeaconBlockRoot": "0x25b66ce21990ffcbbb6248952c6ef7939dbddfb90f6659a2bb43ff369f47c23b", + "executionRequests": [ + "0x", + "0x", + "0x" + ] +} diff --git a/tests/macro_assembler.nim b/tests/macro_assembler.nim index 240e697efd..30778c6426 100644 --- a/tests/macro_assembler.nim +++ b/tests/macro_assembler.nim @@ -377,7 +377,7 @@ proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: CallResult proc createSignedTx(payload: seq[byte], chainId: ChainId): Transaction = let privateKey = PrivateKey.fromHex("7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d")[] let unsignedTx = Transaction( - txType: TxEIP4844, + txType: TxEip4844, nonce: 0, gasPrice: 1.GasInt, gasLimit: 500_000_000.GasInt, diff --git a/tests/replay/pp_light.nim b/tests/replay/pp_light.nim index 17d9901a8d..8c85ac03b5 100644 --- a/tests/replay/pp_light.nim +++ b/tests/replay/pp_light.nim @@ -90,12 +90,12 @@ func pp*(elapsed: Duration): string = result = elapsed.ppMins elif 0 < times.inSeconds(elapsed): result = elapsed.ppSecs - elif 0 < times.inMilliSeconds(elapsed): + elif 0 < times.inMilliseconds(elapsed): result = elapsed.ppMs - elif 0 < times.inMicroSeconds(elapsed): + elif 0 < times.inMicroseconds(elapsed): result = elapsed.ppUs else: - result = $elapsed.inNanoSeconds & "ns" + result = $elapsed.inNanoseconds & "ns" except ValueError: result = $elapsed diff --git a/tests/replay/undump_blocks_gz.nim b/tests/replay/undump_blocks_gz.nim index 2ed17e2b09..d300e51f77 100644 --- a/tests/replay/undump_blocks_gz.nim +++ b/tests/replay/undump_blocks_gz.nim @@ -47,8 +47,8 @@ proc dumpBlocksBeginNl*(db: CoreDbRef; headers: openArray[Header]): string = if headers[0].number == 1'u64: let - h0 = db.getBlockHeader(0'u64) - b0 = db.getBlockBody(h0.blockHash) + h0 = db.getBlockHeader(0'u64).expect("header exists") + b0 = db.getBlockBody(h0.blockHash).expect("block body exists") result = "" & dumpBlocksBegin(@[h0]) & "\n" & dumpBlocksListNl(h0,b0) & diff --git a/tests/test_aristo.nim b/tests/test_aristo.nim index 3846fc410a..b3ef861fee 100644 --- a/tests/test_aristo.nim +++ b/tests/test_aristo.nim @@ -20,6 +20,7 @@ import ./replay/pp, ./test_aristo/test_blobify, ./test_aristo/test_merge_proof, + ./test_aristo/test_nibbles, ./test_aristo/test_portal_proof, ./test_aristo/test_compute, ./test_aristo/[ diff --git a/tests/test_aristo/test_blobify.nim b/tests/test_aristo/test_blobify.nim index 55eb39a27d..d812a1e1b2 100644 --- a/tests/test_aristo/test_blobify.nim +++ b/tests/test_aristo/test_blobify.nim @@ -15,7 +15,6 @@ import unittest2, ../../nimbus/db/aristo/aristo_blobify suite "Aristo blobify": test "VertexRef roundtrip": let - leafRawData = VertexRef(vType: Leaf, lData: LeafPayload(pType: RawData)) leafAccount = VertexRef(vType: Leaf, lData: LeafPayload(pType: AccountData)) leafStoData = VertexRef(vType: Leaf, lData: LeafPayload(pType: StoData, stoData: 42.u256)) @@ -64,9 +63,13 @@ suite "Aristo blobify": ], ) + key = HashKey.fromBytes(rlp.encode([10'u64]))[] + check: - deblobify(blobify(leafRawData), VertexRef)[] == leafRawData - deblobify(blobify(leafAccount), VertexRef)[] == leafAccount - deblobify(blobify(leafStoData), VertexRef)[] == leafStoData - deblobify(blobify(branch), VertexRef)[] == branch - deblobify(blobify(extension), VertexRef)[] == extension + deblobify(blobify(leafAccount, key), VertexRef)[] == leafAccount + deblobify(blobify(leafStoData, key), VertexRef)[] == leafStoData + deblobify(blobify(branch, key), VertexRef)[] == branch + deblobify(blobify(extension, key), VertexRef)[] == extension + + deblobify(blobify(branch, key), HashKey)[] == key + deblobify(blobify(extension, key), HashKey)[] == key diff --git a/tests/test_aristo/test_compute.nim b/tests/test_aristo/test_compute.nim index a1473f2f7e..338e303487 100644 --- a/tests/test_aristo/test_compute.nim +++ b/tests/test_aristo/test_compute.nim @@ -15,8 +15,13 @@ import stew/byteutils, unittest2, ../../nimbus/db/aristo/[ - aristo_check, aristo_compute, aristo_delete, aristo_get, aristo_merge, aristo_desc, - aristo_utils, aristo_serialise, aristo_init, aristo_tx/tx_stow, + aristo_check, + aristo_compute, + aristo_delete, + aristo_merge, + aristo_desc, + aristo_init, + aristo_tx/tx_stow, ] func x(s: string): seq[byte] = @@ -25,79 +30,48 @@ func k(s: string): HashKey = HashKey.fromBytes(s.x).value let samples = [ - # From InvalidBlocks/bc4895-withdrawals/twoIdenticalIndex.json - @[ - ( - "80".x, - "da808094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x, - hash32"27f166f1d7c789251299535cb176ba34116e44894476a7886fe5d73d9be5c973", - VOID_HASH_KEY, - ), - ( - "01".x, - "da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x, - hash32"81eac5f476f48feb289af40ee764015f6b49036760438ea45df90d5342b6ae61", - VOID_HASH_KEY, - ), - ( - "02".x, - "da018094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x, - hash32"463769ae507fcc6d6231c8888425191c5622f330fdd4b78a7b24c4521137b573", - VOID_HASH_KEY, - ), - ( - "03".x, - "da028094c94f5374fce5edbc8e2a8697c15331677e6ebf0b822710".x, - hash32"a95b9a7b58a6b3cb4001eb0be67951c5517141cb0183a255b5cae027a7b10b36", - VOID_HASH_KEY, - ), - ], - # Somew on-the-fly provided stuff @[ + # Create leaf node ( - "0000".x, - "0000".x, - hash32"69a4785bd4f5a1590e329138d4248b6f887fa37e41bfc510a55f21b44f98be61", - "c783200000820000".k, + hash32"0000000000000000000000000000000000000000000000000000000000000001", + AristoAccount(balance: 0.u256, codeHash: EMPTY_CODE_HASH), + hash32"69b5c560f84dde1ecb0584976f4ebbe78e34bb6f32410777309a8693424bb563", ), + # Overwrite existing leaf ( - "0000".x, - "0001".x, - hash32"910fa1155b667666abe7b4b1cb4864c1dc91c57c9528e1c5f5f9f95e003afece", - "c783200000820001".k, + hash32"0000000000000000000000000000000000000000000000000000000000000001", + AristoAccount(balance: 1.u256, codeHash: EMPTY_CODE_HASH), + hash32"5ce3c539427b494d97d1fc89080118370f173d29c7dec55a292e6c00a08c4465", ), + # Split leaf node with extension ( - "0001".x, - "0001".x, - hash32"d082d2bfe8586142d6f40df0245e56365043819e51e2c9799c660558eeea0db5", - "dd821000d9c420820001c420820001808080808080808080808080808080".k, + hash32"0000000000000000000000000000000000000000000000000000000000000002", + AristoAccount(balance: 1.u256, codeHash: EMPTY_CODE_HASH), + hash32"6f28eee5fe67fba78c5bb42cbf6303574c4139ad97631002e07466d2f98c0d35", ), ( - "0002".x, - "0000".x, - hash32"d56ea5154fbad18e0ff1eaeafa2310d0879b59adf189c12ff1b2701e54db07b2", - VOID_HASH_KEY, + hash32"0000000000000000000000000000000000000000000000000000000000000003", + AristoAccount(balance: 0.u256, codeHash: EMPTY_CODE_HASH), + hash32"5dacbc38677935c135b911e8c786444e4dc297db1f0c77775ce47ffb8ce81dca", ), + # Split extension ( - "0100".x, - "0100".x, - hash32"d1c0699fe7928a536e0183c6400ae34eb1174ce6b21f9d117b061385034743ad", - VOID_HASH_KEY, + hash32"0100000000000000000000000000000000000000000000000000000000000000", + AristoAccount(balance: 1.u256, codeHash: EMPTY_CODE_HASH), + hash32"57dd53adbbd1969204c0b3435df8c22e0aadadad50871ce7ab4d802b77da2dd3", ), ( - "0101".x, - "0101".x, - hash32"74ddb98cb56e2dd7e8fa090b9ce740c3de589b72403b20136af75fb6168b1d19", - VOID_HASH_KEY, + hash32"0100000000000000000000000000000000000000000000000000000000000001", + AristoAccount(balance: 2.u256, codeHash: EMPTY_CODE_HASH), + hash32"67ebbac82cc2a55e0758299f63b785fbd3d1f17197b99c78ffd79d73d3026827", ), ( - "0200".x, - "0200".x, - hash32"2e777f06ab2de1a460a8412b8691f10fdcb162077ab5cbb1865636668bcb6471", - VOID_HASH_KEY, + hash32"0200000000000000000000000000000000000000000000000000000000000000", + AristoAccount(balance: 3.u256, codeHash: EMPTY_CODE_HASH), + hash32"e7d6a8f7fb3e936eff91a5f62b96177817f2f45a105b729ab54819a99a353325", ), - ], + ] ] suite "Aristo compute": @@ -105,29 +79,24 @@ suite "Aristo compute": test "Add and delete entries " & $n: let db = AristoDbRef.init VoidBackendRef - root = VertexID(2) + root = VertexID(1) - for inx, (k, v, r, s) in sample: - checkpoint("k = " & k.toHex & ", v = " & v.toHex()) + for (k, v, r) in sample: + checkpoint("k = " & k.toHex & ", v = " & $v) check: - db.mergeGenericData(root, k, v) == Result[bool, AristoError].ok(true) + db.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) # Check state against expected value let w = db.computeKey((root, root)).expect("no errors") check r == w.to(Hash32) - # Check raw node if given, check nor ref against expected value - if s.isValid: - let z = db.getVtx((root, root)).toNode(root, db).value.digestTo(HashKey) - check s == z - let rc = db.check check rc == typeof(rc).ok() # Reverse run deleting entries - var deletedKeys: HashSet[seq[byte]] - for iny, (k, v, r, s) in sample.reversed: + var deletedKeys: HashSet[Hash32] + for iny, (k, v, r) in sample.reversed: # Check whether key was already deleted if k in deletedKeys: continue @@ -138,13 +107,8 @@ suite "Aristo compute": check r == w - # Check raw node if given, check nor ref against expected value - if s.isValid: - let z = db.getVtx((root, root)).toNode(root, db).value.digestTo(HashKey) - check s == z - check: - db.deleteGenericData(root, k).isOk + db.deleteAccountRecord(k).isOk let rc = db.check check rc == typeof(rc).ok() @@ -153,11 +117,15 @@ suite "Aristo compute": # TODO use mainnet genesis in this test? let db = AristoDbRef.init MemBackendRef - root = VertexID(2) + root = VertexID(1) - for inx, (k, v, r, s) in samples[^1]: + for (k, v, r) in samples[^1]: check: - db.mergeGenericData(root, keccak256(k).data, v) == Result[bool, AristoError].ok(true) + db.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true) check db.txStow(1, true).isOk() + check db.computeKeys(root).isOk() + + let w = db.computeKey((root, root)).value.to(Hash32) + check w == samples[^1][^1][2] diff --git a/tests/test_aristo/test_helpers.nim b/tests/test_aristo/test_helpers.nim index f1e8c6be07..c2a7e830a7 100644 --- a/tests/test_aristo/test_helpers.nim +++ b/tests/test_aristo/test_helpers.nim @@ -13,7 +13,7 @@ import eth/common, stew/endians2, ../../nimbus/db/aristo/[ - aristo_debug, aristo_desc, aristo_hike, aristo_layers, aristo_merge, + aristo_debug, aristo_desc, aristo_hike, aristo_layers, aristo_tx], ../replay/pp, "."/[undump_accounts, undump_desc, undump_storages, test_samples_xx] @@ -160,15 +160,16 @@ func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T = let thisRoot = w.root if rootKey != thisRoot: (rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1)) - if 0 < w.data.accounts.len: - result.add ProofTrieData( - root: rootKey, - proof: cast[seq[seq[byte]]](w.data.proof), - kvpLst: w.data.accounts.mapIt(LeafTiePayload( - leafTie: LeafTie( - root: rootVid, - path: it.accKey.to(PathID)), - payload: LeafPayload(pType: RawData, rawBlob: it.accBlob)))) + # TODO rewrite as account leaves + # if 0 < w.data.accounts.len: + # result.add ProofTrieData( + # root: rootKey, + # proof: cast[seq[seq[byte]]](w.data.proof), + # kvpLst: w.data.accounts.mapIt(LeafTiePayload( + # leafTie: LeafTie( + # root: rootVid, + # path: it.accKey.to(PathID)), + # payload: LeafPayload(pType: RawData, rawBlob: it.accBlob)))) func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T = var (rootKey, rootVid) = (default(Hash32), VertexID(0)) @@ -177,15 +178,17 @@ func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T = let thisRoot = w.account.storageRoot if rootKey != thisRoot: (rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1)) - if 0 < w.data.len: - result.add ProofTrieData( - root: thisRoot, - id: n + 1, - kvpLst: w.data.mapIt(LeafTiePayload( - leafTie: LeafTie( - root: rootVid, - path: it.slotHash.to(PathID)), - payload: LeafPayload(pType: RawData, rawBlob: it.slotData)))) + # TODO rewrite as account leaves + + # if 0 < w.data.len: + # result.add ProofTrieData( + # root: thisRoot, + # id: n + 1, + # kvpLst: w.data.mapIt(LeafTiePayload( + # leafTie: LeafTie( + # root: rootVid, + # path: it.slotHash.to(PathID)), + # payload: LeafPayload(pType: RawData, rawBlob: it.slotData)))) if 0 < result.len: result[^1].proof = cast[seq[seq[byte]]](s.data.proof) @@ -217,14 +220,6 @@ proc schedStow*( # ------------------ -proc mergeGenericData*( - db: AristoDbRef; # Database, top layer - leaf: LeafTiePayload; # Leaf item to add to the database - ): Result[bool,AristoError] = - ## Variant of `mergeGenericData()`. - db.mergeGenericData( - leaf.leafTie.root, @(leaf.leafTie.path), leaf.payload.rawBlob) - proc mergeList*( db: AristoDbRef; # Database, top layer leafs: openArray[LeafTiePayload]; # Leaf items to add to the database @@ -235,17 +230,18 @@ proc mergeList*( for n,w in leafs: noisy.say "*** mergeList", " n=", n, "/", leafs.len - let rc = db.mergeGenericData w - noisy.say "*** mergeList", - " n=", n, "/", leafs.len, - " rc=", (if rc.isOk: "ok" else: $rc.error), - "\n -------------\n" - if rc.isErr: - return (n,dups,rc.error) - elif rc.value: - merged.inc - else: - dups.inc + # TODO refactor to not use generic data + # let rc = db.mergeGenericData w + # noisy.say "*** mergeList", + # " n=", n, "/", leafs.len, + # " rc=", (if rc.isOk: "ok" else: $rc.error), + # "\n -------------\n" + # if rc.isErr: + # return (n,dups,rc.error) + # elif rc.value: + # merged.inc + # else: + # dups.inc (merged, dups, AristoError(0)) diff --git a/tests/test_aristo/test_merge_proof.nim b/tests/test_aristo/test_merge_proof.nim index 999ea8488f..399a746e1a 100644 --- a/tests/test_aristo/test_merge_proof.nim +++ b/tests/test_aristo/test_merge_proof.nim @@ -93,58 +93,59 @@ proc testMergeProofAndKvpList*( list: openArray[ProofTrieData]; rdbPath: string; # Rocks DB storage directory idPfx = ""; - ): bool = - var - ps = PartStateRef(nil) - tx = AristoTxRef(nil) - rootKey: Hash32 - defer: - if not ps.isNil: - ps.db.finish(eradicate=true) - - for n,w in list: - - # Start new database upon request - if w.root != rootKey or w.proof.len == 0: - ps.innerCleanUp() - let db = block: - # New DB with disabled filter slots management - if 0 < rdbPath.len: - let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() - let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) - xCheckRc rc.error == 0 - rc.value()[0] - else: - AristoDbRef.init(MemBackendRef) - ps = PartStateRef.init(db) - - # Start transaction (double frame for testing) - tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value - xCheck tx.isTop() - - # Update root - rootKey = w.root - - if 0 < w.proof.len: - let rc = ps.partPut(w.proof, ForceGenericPayload) - xCheckRc rc.error == 0 - - block: - let rc = ps.check() - xCheckRc rc.error == (0,0) - - for ltp in w.kvpLst: - block: - let rc = ps.partMergeGenericData( - testRootVid, @(ltp.leafTie.path), ltp.payload.rawBlob) - xCheckRc rc.error == 0 - block: - let rc = ps.check() - xCheckRc rc.error == (0,0) - - block: - let saveBeOk = tx.saveToBackend(noisy=noisy, debugID=n) - xCheck saveBeOk + ): bool {.deprecated.} = + # TODO update for non-generic data + # var + # ps = PartStateRef(nil) + # tx = AristoTxRef(nil) + # rootKey: Hash32 + # defer: + # if not ps.isNil: + # ps.db.finish(eradicate=true) + + # for n,w in list: + + # # Start new database upon request + # if w.root != rootKey or w.proof.len == 0: + # ps.innerCleanUp() + # let db = block: + # # New DB with disabled filter slots management + # if 0 < rdbPath.len: + # let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() + # let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) + # xCheckRc rc.error == 0 + # rc.value()[0] + # else: + # AristoDbRef.init(MemBackendRef) + # ps = PartStateRef.init(db) + + # # Start transaction (double frame for testing) + # tx = ps.db.txBegin().value.to(AristoDbRef).txBegin().value + # xCheck tx.isTop() + + # # Update root + # rootKey = w.root + + # if 0 < w.proof.len: + # let rc = ps.partPut(w.proof, ForceGenericPayload) + # xCheckRc rc.error == 0 + + # block: + # let rc = ps.check() + # xCheckRc rc.error == (0,0) + + # for ltp in w.kvpLst: + # block: + # let rc = ps.partMergeGenericData( + # testRootVid, @(ltp.leafTie.path), ltp.payload.rawBlob) + # xCheckRc rc.error == 0 + # block: + # let rc = ps.check() + # xCheckRc rc.error == (0,0) + + # block: + # let saveBeOk = tx.saveToBackend(noisy=noisy, debugID=n) + # xCheck saveBeOk true diff --git a/tests/test_aristo/test_nibbles.nim b/tests/test_aristo/test_nibbles.nim new file mode 100644 index 0000000000..8fcaffe8a0 --- /dev/null +++ b/tests/test_aristo/test_nibbles.nim @@ -0,0 +1,86 @@ +# Nimbus +# Copyright (c) 2024 Status Research & Development GmbH +# Licensed under either of +# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or +# http://www.apache.org/licenses/LICENSE-2.0) +# * MIT license ([LICENSE-MIT](LICENSE-MIT) or +# http://opensource.org/licenses/MIT) +# at your option. This file may not be copied, modified, or +# distributed except according to those terms. + +{.used.} + +import + std/sequtils, + stew/byteutils, + unittest2, + ../../nimbus/db/aristo/aristo_desc/desc_nibbles + +suite "Nibbles": + test "trivial cases": + block: + let n = NibblesBuf.fromBytes([]) + check: + n.len == 0 + block: + let n = NibblesBuf.fromBytes([byte 0x10]) + check: + n.len == 2 + n[0] == 1 + n[1] == 0 + $n.slice(1) == "0" + $n.slice(2) == "" + + block: + let n = NibblesBuf.fromBytes(repeat(byte 0x12, 32)) + check: + n.len == 64 + n[0] == 1 + n[63] == 2 + + block: + let n = NibblesBuf.fromBytes(repeat(byte 0x12, 33)) + check: + n.len == 64 + n[0] == 1 + n[63] == 2 + + test "to/from hex encoding": + block: + let n = NibblesBuf.fromBytes([byte 0x12, 0x34, 0x56]) + + let + he = n.toHexPrefix(true) + ho = n.slice(1).toHexPrefix(true) + + check: + NibblesBuf.fromHexPrefix(he.data()) == (true, n) + NibblesBuf.fromHexPrefix(ho.data()) == (true, n.slice(1)) + block: + let n = NibblesBuf.fromBytes(repeat(byte 0x12, 32)) + + let + he = n.toHexPrefix(true) + ho = n.slice(1).toHexPrefix(true) + + check: + NibblesBuf.fromHexPrefix(he.data()) == (true, n) + NibblesBuf.fromHexPrefix(ho.data()) == (true, n.slice(1)) + + NibblesBuf.fromHexPrefix(@(he.data()) & @[byte 1]) == (true, n) + NibblesBuf.fromHexPrefix(@(ho.data()) & @[byte 1]) == (true, n.slice(1)) + + test "long": + let n = NibblesBuf.fromBytes( + hexToSeqByte("0100000000000000000000000000000000000000000000000000000000000000") + ) + + check $n == "0100000000000000000000000000000000000000000000000000000000000000" + check $n.slice(1) == "100000000000000000000000000000000000000000000000000000000000000" + + let + he = n.toHexPrefix(true) + ho = n.slice(1).toHexPrefix(true) + check: + NibblesBuf.fromHexPrefix(he.data()) == (true, n) + NibblesBuf.fromHexPrefix(ho.data()) == (true, n.slice(1)) diff --git a/tests/test_aristo/test_portal_proof.nim b/tests/test_aristo/test_portal_proof.nim index d2e0f3d936..020bf5b856 100644 --- a/tests/test_aristo/test_portal_proof.nim +++ b/tests/test_aristo/test_portal_proof.nim @@ -40,8 +40,9 @@ proc createPartDb(ps: PartStateRef; data: seq[seq[byte]]; info: static[string]) if rc.isErr: raiseAssert info & ": partPut => " & $rc.error # Save keys to database - for (rvid,key) in ps.vkPairs: - ps.db.layersPutKey(rvid, key) + # TODO support for partial databases + # for (rvid,key) in ps.vkPairs: + # ps.db.layersPutKey(rvid, key) # Make sure all is OK block: @@ -88,8 +89,6 @@ proc payloadAsBlob(pyl: LeafPayload; ps: PartStateRef): seq[byte] = ## const info = "payloadAsBlob" case pyl.pType: - of RawData: - pyl.rawBlob of AccountData: let key = block: if pyl.stoID.isValid: @@ -135,7 +134,10 @@ when false: # Private test functions # ------------------------------------------------------------------------------ -proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) = +proc testCreatePortalProof(node: JsonNode, testStatusIMPL: var TestStatus) {.deprecated: "need to be rewritten to use non-generic data".} = + block: # TODO remove after rewrite + skip + return const info = "testCreateProofTwig" # Create partial database diff --git a/tests/test_aristo/test_tx.nim b/tests/test_aristo/test_tx.nim index fa5a7a1e31..a4c94663cb 100644 --- a/tests/test_aristo/test_tx.nim +++ b/tests/test_aristo/test_tx.nim @@ -247,106 +247,106 @@ proc testTxMergeAndDeleteOneByOne*( noisy: bool; list: openArray[ProofTrieData]; rdbPath: string; # Rocks DB storage directory - ): bool = - var - prng = PrngDesc.init 42 - db = AristoDbRef(nil) - fwdRevVfyToggle = true - defer: - if not db.isNil: - db.finish(eradicate=true) - - for n,w in list: - # Start with brand new persistent database. - db = block: - if 0 < rdbPath.len: - let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() - let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) - xCheckRc rc.error == 0 - rc.value()[0] - else: - AristoDbRef.init(MemBackendRef) - - # Start transaction (double frame for testing) - xCheck db.txTop.isErr - var tx = db.txBegin().value.to(AristoDbRef).txBegin().value - xCheck tx.isTop() - xCheck tx.level == 2 - - # Reset database so that the next round has a clean setup - defer: db.innerCleanUp - - # Merge leaf data into main trie - let kvpLeafs = block: - var lst = w.kvpLst.mapRootVid testRootVid - # The list might be reduced for isolation of particular properties, - # e.g. lst.setLen(min(5,lst.len)) - lst - for i,leaf in kvpLeafs: - let rc = db.mergeGenericData leaf - xCheckRc rc.error == 0 - - # List of all leaf entries that should be on the database - var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet - - # Provide a (reproducible) peudo-random copy of the leafs list - let leafVidPairs = block: - let rc = db.randomisedLeafs(leafsLeft, prng) - xCheckRc rc.error == (0,0) - rc.value - - # Trigger subsequent saving tasks in loop below - let (saveMod, saveRest, relax) = block: - if leafVidPairs.len < 17: (7, 3, false) - elif leafVidPairs.len < 31: (11, 7, false) - else: (leafVidPairs.len div 5, 11, true) - - # === Loop over leafs === - for u,lvp in leafVidPairs: - let - runID = n + list.len * u - tailWalkVerify = 7 # + 999 - doSaveBeOk = ((u mod saveMod) == saveRest) - (leaf, lid) = lvp - - if doSaveBeOk: - let saveBeOk = tx.saveToBackend(relax=relax, noisy=noisy, runID) - xCheck saveBeOk: - noisy.say "***", "del1by1(2)", - " u=", u, - " n=", n, "/", list.len, - "\n db\n ", db.pp(backendOk=true), - "" - - # Delete leaf - block: - let rc = db.deleteGenericData(leaf.root, @(leaf.path)) - xCheckRc rc.error == 0 - - # Update list of remaininf leafs - leafsLeft.excl leaf - - let deletedVtx = tx.db.getVtx lid - xCheck deletedVtx.isValid == false: - noisy.say "***", "del1by1(8)" - - # Walking the database is too slow for large tables. So the hope is that - # potential errors will not go away and rather pop up later, as well. - if leafsLeft.len <= tailWalkVerify: - if u < leafVidPairs.len-1: - if fwdRevVfyToggle: - fwdRevVfyToggle = false - if not db.fwdWalkVerify(leaf.root, leafsLeft, noisy, runID): - return - else: - fwdRevVfyToggle = true - if not db.revWalkVerify(leaf.root, leafsLeft, noisy, runID): - return - - when true and false: - noisy.say "***", "del1by1(9)", - " n=", n, "/", list.len, - " nLeafs=", kvpLeafs.len + ): bool {.deprecated: "rewrite to use non-generic data".} = + # var + # prng = PrngDesc.init 42 + # db = AristoDbRef(nil) + # fwdRevVfyToggle = true + # defer: + # if not db.isNil: + # db.finish(eradicate=true) + + # for n,w in list: + # # Start with brand new persistent database. + # db = block: + # if 0 < rdbPath.len: + # let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() + # let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) + # xCheckRc rc.error == 0 + # rc.value()[0] + # else: + # AristoDbRef.init(MemBackendRef) + + # # Start transaction (double frame for testing) + # xCheck db.txTop.isErr + # var tx = db.txBegin().value.to(AristoDbRef).txBegin().value + # xCheck tx.isTop() + # xCheck tx.level == 2 + + # # Reset database so that the next round has a clean setup + # defer: db.innerCleanUp + + # # Merge leaf data into main trie + # let kvpLeafs = block: + # var lst = w.kvpLst.mapRootVid testRootVid + # # The list might be reduced for isolation of particular properties, + # # e.g. lst.setLen(min(5,lst.len)) + # lst + # for i,leaf in kvpLeafs: + # let rc = db.mergeGenericData leaf + # xCheckRc rc.error == 0 + + # # List of all leaf entries that should be on the database + # var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet + + # # Provide a (reproducible) peudo-random copy of the leafs list + # let leafVidPairs = block: + # let rc = db.randomisedLeafs(leafsLeft, prng) + # xCheckRc rc.error == (0,0) + # rc.value + + # # Trigger subsequent saving tasks in loop below + # let (saveMod, saveRest, relax) = block: + # if leafVidPairs.len < 17: (7, 3, false) + # elif leafVidPairs.len < 31: (11, 7, false) + # else: (leafVidPairs.len div 5, 11, true) + + # # === Loop over leafs === + # for u,lvp in leafVidPairs: + # let + # runID = n + list.len * u + # tailWalkVerify = 7 # + 999 + # doSaveBeOk = ((u mod saveMod) == saveRest) + # (leaf, lid) = lvp + + # if doSaveBeOk: + # let saveBeOk = tx.saveToBackend(relax=relax, noisy=noisy, runID) + # xCheck saveBeOk: + # noisy.say "***", "del1by1(2)", + # " u=", u, + # " n=", n, "/", list.len, + # "\n db\n ", db.pp(backendOk=true), + # "" + + # # Delete leaf + # block: + # let rc = db.deleteGenericData(leaf.root, @(leaf.path)) + # xCheckRc rc.error == 0 + + # # Update list of remaininf leafs + # leafsLeft.excl leaf + + # let deletedVtx = tx.db.getVtx lid + # xCheck deletedVtx.isValid == false: + # noisy.say "***", "del1by1(8)" + + # # Walking the database is too slow for large tables. So the hope is that + # # potential errors will not go away and rather pop up later, as well. + # if leafsLeft.len <= tailWalkVerify: + # if u < leafVidPairs.len-1: + # if fwdRevVfyToggle: + # fwdRevVfyToggle = false + # if not db.fwdWalkVerify(leaf.root, leafsLeft, noisy, runID): + # return + # else: + # fwdRevVfyToggle = true + # if not db.revWalkVerify(leaf.root, leafsLeft, noisy, runID): + # return + + # when true and false: + # noisy.say "***", "del1by1(9)", + # " n=", n, "/", list.len, + # " nLeafs=", kvpLeafs.len true @@ -355,79 +355,79 @@ proc testTxMergeAndDeleteSubTree*( noisy: bool; list: openArray[ProofTrieData]; rdbPath: string; # Rocks DB storage directory - ): bool = - var - prng = PrngDesc.init 42 - db = AristoDbRef(nil) - defer: - if not db.isNil: - db.finish(eradicate=true) - - for n,w in list: - # Start with brand new persistent database. - db = block: - if 0 < rdbPath.len: - let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() - let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) - xCheckRc rc.error == 0 - rc.value()[0] - else: - AristoDbRef.init(MemBackendRef) - - # Start transaction (double frame for testing) - xCheck db.txTop.isErr - var tx = db.txBegin().value.to(AristoDbRef).txBegin().value - xCheck tx.isTop() - xCheck tx.level == 2 - - # Reset database so that the next round has a clean setup - defer: db.innerCleanUp - - # Merge leaf data into main trie (w/vertex ID 2) - let kvpLeafs = block: - var lst = w.kvpLst.mapRootVid testRootVid - # The list might be reduced for isolation of particular properties, - # e.g. lst.setLen(min(5,lst.len)) - lst - for i,leaf in kvpLeafs: - let rc = db.mergeGenericData leaf - xCheckRc rc.error == 0 - - # List of all leaf entries that should be on the database - var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet - - # Provide a (reproducible) peudo-random copy of the leafs list - let leafVidPairs = block: - let rc = db.randomisedLeafs(leafsLeft, prng) - xCheckRc rc.error == (0,0) - rc.value - discard leafVidPairs - - # === delete sub-tree === - block: - let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 1+list.len*n) - xCheck saveBeOk: - noisy.say "***", "del(1)", - " n=", n, "/", list.len, - "\n db\n ", db.pp(backendOk=true), - "" - # Delete sub-tree - block: - let rc = db.deleteGenericTree testRootVid - xCheckRc rc.error == 0: - noisy.say "***", "del(2)", - " n=", n, "/", list.len, - "\n db\n ", db.pp(backendOk=true), - "" - block: - let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 2+list.len*n) - xCheck saveBeOk: - noisy.say "***", "del(3)", - " n=", n, "/", list.len, - "\n db\n ", db.pp(backendOk=true), - "" - when true and false: - noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len + ): bool {.deprecated: "rewrite to use non-generic data".} = + # var + # prng = PrngDesc.init 42 + # db = AristoDbRef(nil) + # defer: + # if not db.isNil: + # db.finish(eradicate=true) + + # for n,w in list: + # # Start with brand new persistent database. + # db = block: + # if 0 < rdbPath.len: + # let (dbOpts, cfOpts) = DbOptions.init().toRocksDb() + # let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init(), dbOpts, cfOpts, []) + # xCheckRc rc.error == 0 + # rc.value()[0] + # else: + # AristoDbRef.init(MemBackendRef) + + # # Start transaction (double frame for testing) + # xCheck db.txTop.isErr + # var tx = db.txBegin().value.to(AristoDbRef).txBegin().value + # xCheck tx.isTop() + # xCheck tx.level == 2 + + # # Reset database so that the next round has a clean setup + # defer: db.innerCleanUp + + # # Merge leaf data into main trie (w/vertex ID 2) + # let kvpLeafs = block: + # var lst = w.kvpLst.mapRootVid testRootVid + # # The list might be reduced for isolation of particular properties, + # # e.g. lst.setLen(min(5,lst.len)) + # lst + # for i,leaf in kvpLeafs: + # let rc = db.mergeGenericData leaf + # xCheckRc rc.error == 0 + + # # List of all leaf entries that should be on the database + # var leafsLeft = kvpLeafs.mapIt(it.leafTie).toHashSet + + # # Provide a (reproducible) peudo-random copy of the leafs list + # let leafVidPairs = block: + # let rc = db.randomisedLeafs(leafsLeft, prng) + # xCheckRc rc.error == (0,0) + # rc.value + # discard leafVidPairs + + # # === delete sub-tree === + # block: + # let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 1+list.len*n) + # xCheck saveBeOk: + # noisy.say "***", "del(1)", + # " n=", n, "/", list.len, + # "\n db\n ", db.pp(backendOk=true), + # "" + # # Delete sub-tree + # block: + # let rc = db.deleteGenericTree testRootVid + # xCheckRc rc.error == 0: + # noisy.say "***", "del(2)", + # " n=", n, "/", list.len, + # "\n db\n ", db.pp(backendOk=true), + # "" + # block: + # let saveBeOk = tx.saveToBackend(relax=false, noisy=noisy, 2+list.len*n) + # xCheck saveBeOk: + # noisy.say "***", "del(3)", + # " n=", n, "/", list.len, + # "\n db\n ", db.pp(backendOk=true), + # "" + # when true and false: + # noisy.say "***", "del(9) n=", n, "/", list.len, " nLeafs=", kvpLeafs.len true diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index c61ed8b9e9..d2be00399e 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -59,7 +59,7 @@ proc parseEnv(node: JsonNode): TestEnv = result.pre = node["pre"] proc rootExists(db: CoreDbRef; root: Hash32): bool = - let state = db.ctx.getAccounts().stateRoot(updateOk=true).valueOr: + let state = db.ctx.getAccounts().getStateRoot().valueOr: return false state == root @@ -74,12 +74,13 @@ proc executeCase(node: JsonNode): bool = setupStateDB(env.pre, stateDB) stateDB.persist() - if not com.db.persistHeader(env.genesisHeader, - com.proofOfStake(env.genesisHeader)): - debugEcho "Failed to put genesis header into database" + com.db.persistHeader(env.genesisHeader, + com.proofOfStake(env.genesisHeader)).isOkOr: + debugEcho "Failed to put genesis header into database: ", error return false - if com.db.getCanonicalHead().blockHash != env.genesisHeader.blockHash: + let chead = com.db.getCanonicalHead().expect("canonicalHead exists") + if chead.blockHash != env.genesisHeader.blockHash: debugEcho "Genesis block hash in database is different with expected genesis block hash" return false @@ -102,7 +103,7 @@ proc executeCase(node: JsonNode): bool = debugEcho error return false - let head = com.db.getCanonicalHead() + let head = com.db.getCanonicalHead().expect("canonicalHead exists") let headHash = head.blockHash if headHash != env.lastBlockHash: debugEcho "lastestBlockHash mismatch, get: ", headHash, diff --git a/tests/test_configuration.nim b/tests/test_configuration.nim index aed783add6..3a6966bcee 100644 --- a/tests/test_configuration.nim +++ b/tests/test_configuration.nim @@ -128,15 +128,6 @@ proc configurationMain*() = let cx = cc.getWsFlags() check { RpcFlag.Eth, RpcFlag.Debug } == cx - test "protocols": - let conf = makeTestConfig() - let flags = conf.getProtocolFlags() - check ProtocolFlag.Eth in flags - - let bb = makeConfig(@["--protocols:eth"]) - let bx = bb.getProtocolFlags() - check ProtocolFlag.Eth in bx - test "bootstrap-node and bootstrap-file": let conf = makeTestConfig() let bootnodes = conf.getBootNodes() diff --git a/tests/test_coredb.nim.cfg b/tests/test_coredb.nim.cfg new file mode 100644 index 0000000000..1832a54a36 --- /dev/null +++ b/tests/test_coredb.nim.cfg @@ -0,0 +1 @@ +-d:unittest2DisableParamFiltering \ No newline at end of file diff --git a/tests/test_coredb/test_chainsync.nim b/tests/test_coredb/test_chainsync.nim index 02d1124034..48ee9e21e3 100644 --- a/tests/test_coredb/test_chainsync.nim +++ b/tests/test_coredb/test_chainsync.nim @@ -19,8 +19,7 @@ import ../replay/[pp, undump_blocks, undump_blocks_era1, xcheck], ./test_helpers -when CoreDbEnableProfiling or - LedgerEnableApiProfiling: +when CoreDbEnableProfiling: import std/sequtils @@ -33,12 +32,6 @@ when CoreDbEnableProfiling: kvtProfData: KvtDbProfListRef cdbProfData: CoreDbProfListRef -when LedgerEnableApiProfiling: - import - ../../nimbus/db/ledger/base/base_helpers - var - ldgProfData: LedgerProfListRef - const EnableExtraLoggingControl = true var @@ -125,11 +118,6 @@ proc test_chainSyncProfilingPrint*( else: "" discard info var blurb: seq[string] - when LedgerEnableApiProfiling: - blurb.add ldgProfData.profilingPrinter( - names = LedgerFnInx.toSeq.mapIt($it), - header = "Ledger profiling results" & info, - indent) when CoreDbEnableProfiling: blurb.add cdbProfData.profilingPrinter( names = CoreDbFnInx.toSeq.mapIt($it), @@ -192,8 +180,6 @@ proc test_chainSync*( aristoProfData = com.db.ariApi.AristoApiProfRef.data kvtProfData = com.db.kvtApi.KvtApiProfRef.data cdbProfData = com.db.profTab - when LedgerEnableApiProfiling: - ldgProfData = com.db.ldgProfData() # This will enable printing the `era1` covered block ranges (if any) undump_blocks_era1.noisy = noisy @@ -217,7 +203,7 @@ proc test_chainSync*( for w in files.undumpBlocks(least = start): let (fromBlock, toBlock) = (w[0].header.number, w[^1].header.number) if fromBlock == 0'u64: - xCheck w[0].header == com.db.getBlockHeader(0'u64) + xCheck w[0].header == com.db.getBlockHeader(0'u64).expect("block header exists") continue # Process groups of blocks ... diff --git a/tests/test_difficulty.nim b/tests/test_difficulty.nim index c34a49f501..9ea54667cf 100644 --- a/tests/test_difficulty.nim +++ b/tests/test_difficulty.nim @@ -20,11 +20,11 @@ import type Tester = object parentTimestamp: int64 - parentDifficulty: Uint256 + parentDifficulty: UInt256 parentUncles: Hash32 currentTimestamp: int64 currentBlockNumber: uint64 - currentDifficulty: Uint256 + currentDifficulty: UInt256 Tests = Table[string, Tester] @@ -37,11 +37,11 @@ proc hexOrInt64(data: JsonNode, key: string, hex: static[bool]): int64 = else: int64(parseInt data[key].getStr) -proc hexOrInt256(data: JsonNode, key: string, hex: static[bool]): Uint256 = +proc hexOrInt256(data: JsonNode, key: string, hex: static[bool]): UInt256 = when hex: UInt256.fromHex data[key].getStr else: - parse(data[key].getStr, Uint256) + parse(data[key].getStr, UInt256) proc parseHash(data: string): Hash32 = case data @@ -50,7 +50,7 @@ proc parseHash(data: string): Hash32 = else: doAssert(false, "invalid uncle hash") -proc parseTests(testData: JSonNode): Tests = +proc parseTests(testData: JsonNode): Tests = const hex = true result = Table[string, Tester]() var t: Tester @@ -115,7 +115,7 @@ template runTest() = for fname in filenames: let filename = fname - test fname.subStr(inputPath.len + 1): + test fname.substr(inputPath.len + 1): let fixtures = parseJson(readFile(filename)) testFixture(fixtures, testStatusIMPL) diff --git a/tests/test_engine_api.nim b/tests/test_engine_api.nim index 4d081f0ef5..b3c3c60108 100644 --- a/tests/test_engine_api.nim +++ b/tests/test_engine_api.nim @@ -14,6 +14,8 @@ import json_rpc/rpcclient, json_rpc/rpcserver, web3/engine_api, + web3/conversions, + web3/execution_types, unittest2 import @@ -26,16 +28,31 @@ import ../hive_integration/nodocker/engine/engine_client type - TestEnv* = ref object + TestEnv = ref object com : CommonRef server : RpcHttpServer client : RpcHttpClient chain : ForkedChainRef + NewPayloadV4Params* = object + payload*: ExecutionPayload + expectedBlobVersionedHashes*: Opt[seq[Hash32]] + parentBeaconBlockRoot*: Opt[Hash32] + executionRequests*: Opt[array[3, seq[byte]]] + + TestSpec = object + name: string + fork: HardFork + genesisFile: string + testProc: proc(env: TestEnv): Result[void, string] + +NewPayloadV4Params.useDefaultSerializationIn JrpcConv + const - genesisFile = "tests/customgenesis/engine_api_genesis.json" + defaultGenesisFile = "tests/customgenesis/engine_api_genesis.json" + mekongGenesisFile = "tests/customgenesis/mekong.json" -proc setupConfig(): NimbusConf = +proc setupConfig(genesisFile: string): NimbusConf = makeConfig(@[ "--custom-network:" & genesisFile, "--listen-address: 127.0.0.1", @@ -53,11 +70,12 @@ proc setupClient(port: Port): RpcHttpClient = waitFor client.connect("127.0.0.1", port, false) return client -proc setupEnv(envFork: HardFork = MergeFork): TestEnv = +proc setupEnv(envFork: HardFork = MergeFork, + genesisFile: string = defaultGenesisFile): TestEnv = doAssert(envFork >= MergeFork) let - conf = setupConfig() + conf = setupConfig(genesisFile) if envFork >= Shanghai: conf.networkParams.config.shanghaiTime = Opt.some(0.EthTime) @@ -70,8 +88,8 @@ proc setupEnv(envFork: HardFork = MergeFork): TestEnv = let com = setupCom(conf) - head = com.db.getCanonicalHead() - chain = newForkedChain(com, head) + head = com.db.getCanonicalHead().expect("canonical head exists") + chain = ForkedChainRef.init(com) txPool = TxPoolRef.new(com) # txPool must be informed of active head @@ -85,7 +103,7 @@ proc setupEnv(envFork: HardFork = MergeFork): TestEnv = beaconEngine = BeaconEngineRef.new(txPool, chain) serverApi = newServerAPI(chain, txPool) - setupServerAPI(serverApi, server) + setupServerAPI(serverApi, server, newEthContext()) setupEngineAPI(beaconEngine, server) server.start() @@ -119,8 +137,8 @@ proc runBasicCycleTest(env: TestEnv): Result[void, string] = withdrawals: Opt.some(newSeq[WithdrawalV1]()), ) fcuRes = ? client.forkchoiceUpdated(Version.V1, update, Opt.some(attr)) - payload = ? client.getPayload(fcuRes.payloadId.get, Version.V1) - npRes = ? client.newPayload(Version.V1, payload.executionPayload) + payload = ? client.getPayload(Version.V1, fcuRes.payloadId.get) + npRes = ? client.newPayloadV1(payload.executionPayload) discard ? client.forkchoiceUpdated(Version.V1, ForkchoiceStateV1( headBlockHash: npRes.latestValidHash.get @@ -148,10 +166,10 @@ proc runNewPayloadV4Test(env: TestEnv): Result[void, string] = parentBeaconBlockRoot: Opt.some(default(Hash32)) ) fcuRes = ? client.forkchoiceUpdated(Version.V3, update, Opt.some(attr)) - payload = ? client.getPayload(fcuRes.payloadId.get, Version.V4) - res = ? client.newPayload(Version.V4, - payload.executionPayload, - Opt.some(default(Hash32)), + payload = ? client.getPayload(Version.V4, fcuRes.payloadId.get) + res = ? client.newPayloadV4(payload.executionPayload, + Opt.some(default(seq[Hash32])), + attr.parentBeaconBlockRoot, payload.executionRequests) if res.status != PayloadExecutionStatus.valid: @@ -166,23 +184,99 @@ proc runNewPayloadV4Test(env: TestEnv): Result[void, string] = ok() +proc newPayloadV4ParamsTest(env: TestEnv): Result[void, string] = + const + paramsFile = "tests/engine_api/newPayloadV4_invalid_blockhash.json" + + let + client = env.client + params = JrpcConv.loadFile(paramsFile, NewPayloadV4Params) + res = ? client.newPayloadV4( + params.payload, + params.expectedBlobVersionedHashes, + params.parentBeaconBlockRoot, + params.executionRequests) + + if res.status != PayloadExecutionStatus.syncing: + return err("res.status should equals to PayloadExecutionStatus.syncing") + + if res.latestValidHash.isSome: + return err("lastestValidHash should empty") + + if res.validationError.isSome: + return err("validationError should empty") + + ok() + +proc genesisShouldCanonicalTest(env: TestEnv): Result[void, string] = + const + paramsFile = "tests/engine_api/genesis_base_canonical.json" + + let + client = env.client + params = JrpcConv.loadFile(paramsFile, NewPayloadV4Params) + res = ? client.newPayloadV3( + params.payload, + params.expectedBlobVersionedHashes, + params.parentBeaconBlockRoot) + + if res.status != PayloadExecutionStatus.valid: + return err("res.status should equals to PayloadExecutionStatus.valid") + + if res.latestValidHash.isNone: + return err("lastestValidHash should not empty") + + let + update = ForkchoiceStateV1( + headBlockHash: params.payload.blockHash, + safeBlockHash: params.payload.parentHash, + finalizedBlockHash: params.payload.parentHash, + ) + fcuRes = ? client.forkchoiceUpdated(Version.V3, update) + + if fcuRes.payloadStatus.status != PayloadExecutionStatus.valid: + return err("fcuRes.payloadStatus.status should equals to PayloadExecutionStatus.valid") + + ok() + +const testList = [ + TestSpec( + name: "Basic cycle", + fork: MergeFork, + testProc: runBasicCycleTest + ), + TestSpec( + name: "newPayloadV4", + fork: Prague, + testProc: runNewPayloadV4Test + ), + TestSpec( + name: "newPayloadV4 params", + fork: Prague, + testProc: newPayloadV4ParamsTest + ), + TestSpec( + name: "Genesis block hash should canonical", + fork: Cancun, + testProc: genesisShouldCanonicalTest, + genesisFile: mekongGenesisFile + ), + ] + proc engineApiMain*() = suite "Engine API": - test "Basic cycle": - let env = setupEnv() - let res = env.runBasicCycleTest() - if res.isErr: - debugEcho "FAILED TO EXECUTE TEST: ", res.error - check res.isOk - env.close() - - test "newPayloadV4": - let env = setupEnv(Prague) - let res = env.runNewPayloadV4Test() - if res.isErr: - debugEcho "FAILED TO EXECUTE TEST: ", res.error - check res.isOk - env.close() + for z in testList: + test z.name: + let genesisFile = if z.genesisFile.len > 0: + z.genesisFile + else: + defaultGenesisFile + let env = setupEnv(z.fork, genesisFile) + let res = z.testProc(env) + if res.isErr: + debugEcho "FAILED TO EXECUTE ", z.name, ": ", res.error + check res.isOk + env.close() when isMainModule: engineApiMain() diff --git a/tests/test_forked_chain.nim b/tests/test_forked_chain.nim index e2c333a695..714a9584bd 100644 --- a/tests/test_forked_chain.nim +++ b/tests/test_forked_chain.nim @@ -88,14 +88,15 @@ proc makeBlk(com: CommonRef, number: BlockNumber, parentBlk: Block, extraData: b blk proc headHash(c: CommonRef): Hash32 = - c.db.getCanonicalHead().blockHash + c.db.getCanonicalHead().expect("canonical head exists").blockHash func blockHash(x: Block): Hash32 = x.header.blockHash proc wdWritten(com: CommonRef, blk: Block): int = if blk.header.withdrawalsRoot.isSome: - com.db.getWithdrawals(blk.header.withdrawalsRoot.get).len + com.db.getWithdrawals(blk.header.withdrawalsRoot.get). + expect("withdrawals exists").len else: 0 @@ -419,5 +420,21 @@ proc forkedChainMain*() = check chain.headerByNumber(5).expect("OK").number == 5 check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash + test "Import after Replay Segment": + let com = env.newCom() + var chain = newForkedChain(com, com.genesisHeader, baseDistance = 3) + + check chain.importBlock(blk1).isOk + check chain.importBlock(blk2).isOk + check chain.importBlock(blk3).isOk + check chain.importBlock(blk4).isOk + check chain.importBlock(blk5).isOk + + chain.replaySegment(blk2.header.blockHash) + chain.replaySegment(blk5.header.blockHash) + + check chain.importBlock(blk6).isOk + check chain.importBlock(blk7).isOk + when isMainModule: forkedChainMain() diff --git a/tests/test_generalstate_json.nim b/tests/test_generalstate_json.nim index fcfb517120..0d7c75c64d 100644 --- a/tests/test_generalstate_json.nim +++ b/tests/test_generalstate_json.nim @@ -155,7 +155,7 @@ proc testFixture(fixtures: JsonNode, testStatusIMPL: var TestStatus, ctx.chainConfig = getChainConfig(forkName) except ValueError as ex: debugEcho ex.msg - testStatusIMPL = TestStatus.Failed + testStatusIMPL = TestStatus.FAILED return template runSubTest(subTest: JsonNode) = diff --git a/tests/test_genesis.nim b/tests/test_genesis.nim index 59df3a9ce2..ae2c281083 100644 --- a/tests/test_genesis.nim +++ b/tests/test_genesis.nim @@ -89,7 +89,7 @@ proc customGenesisTest() = let com = CommonRef.new(newCoreDbRef DefaultDbMemory, params = cg) let stateRoot = hash32"d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" let genesisHash = hash32"d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" - let ttd = "46_089_003_871_917_200_000_000".parse(Uint256) + let ttd = "46_089_003_871_917_200_000_000".parse(UInt256) check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd @@ -102,7 +102,7 @@ proc customGenesisTest() = let com = CommonRef.new(newCoreDbRef DefaultDbMemory, params = cg) let stateRoot = hash32"d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" let genesisHash = hash32"d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3" - let ttd = "46_089_003_871_917_200_000_000".parse(Uint256) + let ttd = "46_089_003_871_917_200_000_000".parse(UInt256) check com.genesisHeader.stateRoot == stateRoot check com.genesisHeader.blockHash == genesisHash check com.ttd.get == ttd diff --git a/tests/test_getproof_json.nim b/tests/test_getproof_json.nim index 22cd65c5bb..a6d6c8b6bf 100644 --- a/tests/test_getproof_json.nim +++ b/tests/test_getproof_json.nim @@ -15,7 +15,7 @@ import eth/[rlp, trie/trie_defs, trie/hexary_proof_verification], ../nimbus/db/[ledger, core_db], ../nimbus/common/chain_config, - ../nimbus/rpc/p2p + ../nimbus/rpc/server_api type Hash32 = eth_types.Hash32 diff --git a/tests/test_helpers.nim b/tests/test_helpers.nim index 084d087493..4fb3443acf 100644 --- a/tests/test_helpers.nim +++ b/tests/test_helpers.nim @@ -68,7 +68,7 @@ proc jsonTestImpl*(inputFolder, outputName: string, handler, skipTest: NimNode): doAssert(filenames.len > 0) for fname in filenames: let filename = fname - test fname.subStr(inputPath.len + 1): + test fname.substr(inputPath.len + 1): {.gcsafe.}: let (folder, name) = filename.splitPath() diff --git a/tests/test_ledger.nim b/tests/test_ledger.nim index 06586d7ded..40cc9e51aa 100644 --- a/tests/test_ledger.nim +++ b/tests/test_ledger.nim @@ -14,7 +14,6 @@ import stew/byteutils, stew/endians2, ../nimbus/config, - ../nimbus/db/ledger, ../nimbus/db/storage_types, ../nimbus/common/common, ../nimbus/core/chain, @@ -22,7 +21,7 @@ import ../nimbus/core/casper, ../nimbus/transaction, ../nimbus/constants, - ../nimbus/db/ledger/backend/accounts_ledger {.all.}, # import all private symbols + ../nimbus/db/ledger {.all.}, # import all private symbols unittest2 const @@ -339,8 +338,9 @@ proc runLedgerTransactionTests(noisy = true) = for tx in body.transactions: env.txs.add tx + let head = env.xdb.getCanonicalHead().expect("canonicalHead exists") test &"Collect unique recipient addresses from {env.txs.len} txs," & - &" head=#{env.xdb.getCanonicalHead.number}": + &" head=#{head.number}": # since we generate our own transactions instead of replaying # from testnet blocks, the recipients already unique. for n,tx in env.txs: @@ -348,7 +348,6 @@ proc runLedgerTransactionTests(noisy = true) = env.txi.add n test &"Run {env.txi.len} two-step trials with rollback": - let head = env.xdb.getCanonicalHead() for n in env.txi: let dbTx = env.xdb.ctx.newTransaction() defer: dbTx.dispose() @@ -356,7 +355,6 @@ proc runLedgerTransactionTests(noisy = true) = env.runTrial2ok(ledger, n) test &"Run {env.txi.len} three-step trials with rollback": - let head = env.xdb.getCanonicalHead() for n in env.txi: let dbTx = env.xdb.ctx.newTransaction() defer: dbTx.dispose() @@ -365,7 +363,6 @@ proc runLedgerTransactionTests(noisy = true) = test &"Run {env.txi.len} three-step trials with extra db frame rollback" & " throwing Exceptions": - let head = env.xdb.getCanonicalHead() for n in env.txi: let dbTx = env.xdb.ctx.newTransaction() defer: dbTx.dispose() @@ -373,7 +370,6 @@ proc runLedgerTransactionTests(noisy = true) = env.runTrial3Survive(ledger, n, noisy) test &"Run {env.txi.len} tree-step trials without rollback": - let head = env.xdb.getCanonicalHead() for n in env.txi: let dbTx = env.xdb.ctx.newTransaction() defer: dbTx.dispose() @@ -381,7 +377,6 @@ proc runLedgerTransactionTests(noisy = true) = env.runTrial3(ledger, n, rollback = false) test &"Run {env.txi.len} four-step trials with rollback and db frames": - let head = env.xdb.getCanonicalHead() for n in env.txi: let dbTx = env.xdb.ctx.newTransaction() defer: dbTx.dispose() diff --git a/tests/test_op_memory.nim b/tests/test_op_memory.nim index 692a940564..d8cbb9a60e 100644 --- a/tests/test_op_memory.nim +++ b/tests/test_op_memory.nim @@ -306,7 +306,7 @@ proc opMemoryMain*() = var body = newStmtList() var stack = newStmtList() - for x in countDown(i, 0): + for x in countdown(i, 0): let val = newLit("0x" & toHex(x+10, 2)) body.add quote do: `pushIdent` `val` diff --git a/tests/test_rpc.nim b/tests/test_rpc.nim index 14561b7006..4cadef1ea1 100644 --- a/tests/test_rpc.nim +++ b/tests/test_rpc.nim @@ -6,13 +6,14 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import + chronicles, std/[json, os, typetraits, times, sequtils], asynctest, web3/eth_api, stew/byteutils, json_rpc/[rpcserver, rpcclient], - nimcrypto/[keccak, hash], - eth/[rlp, keys, trie/hexary_proof_verification], - eth/common/transaction_utils, + eth/[rlp, trie/hexary_proof_verification], + eth/common/[transaction_utils, addresses], + ../hive_integration/nodocker/engine/engine_client, ../nimbus/[constants, transaction, config, evm/state, evm/types, version], ../nimbus/db/[ledger, storage_types], ../nimbus/sync/protocol, @@ -26,11 +27,6 @@ import ./macro_assembler, ./test_block_fixture -const - zeroAddress = block: - var rc: Address - rc - type Hash32 = common.Hash32 Header = common.Header @@ -50,7 +46,7 @@ func emptyStorageHash(): Hash32 = proc verifyAccountProof(trustedStateRoot: Hash32, res: ProofResponse): MptProofVerificationResult = let - key = toSeq(keccak256(res.address).data) + key = toSeq(keccak256(res.address.data).data) value = rlp.encode(Account( nonce: res.nonce.uint64, balance: res.balance, @@ -78,18 +74,34 @@ proc persistFixtureBlock(chainDB: CoreDbRef) = let header = getBlockHeader4514995() # Manually inserting header to avoid any parent checks discard chainDB.ctx.getKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header)) - chainDB.addBlockNumberToHashLookup(header) + chainDB.addBlockNumberToHashLookup(header.number, header.blockHash) chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions) chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995()) -proc setupEnv(com: CommonRef, signer, ks2: Address, ctx: EthContext): TestEnv = +proc setupClient(port: Port): RpcHttpClient = + let client = newRpcHttpClient() + waitFor client.connect("127.0.0.1", port, false) + return client + +proc close(client: RpcHttpClient, server: RpcHttpServer) = + waitFor client.close() + waitFor server.closeWait() + + +# NOTE : The setup of the environment should have been done through the +# `ForkedChainRef`, however the `ForkedChainRef` is does not persist blocks to the db +# unless the base distance is reached. This is not the case for the tests, so we +# have to manually persist the blocks to the db. +# Main goal of the tests to check the RPC calls, can serve data persisted in the db +# as data from memory blocks are easily tested via kurtosis or other tests +proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv = var - parent = com.db.getCanonicalHead() acc = ctx.am.getAccount(signer).tryGet() - blockNumber = 1.BlockNumber + blockNumber = 1'u64 + parent = com.db.getCanonicalHead().expect("canonicalHead exists") parentHash = parent.blockHash - const code = evmByteCode: + let code = evmByteCode: Push4 "0xDEADBEEF" # PUSH Push1 "0x00" # MSTORE AT 0x00 Mstore @@ -99,28 +111,26 @@ proc setupEnv(com: CommonRef, signer, ks2: Address, ctx: EthContext): TestEnv = let vmHeader = Header(parentHash: parentHash, gasLimit: 5_000_000) - vmState = BaseVMState.new( - parent = Header(stateRoot: parent.stateRoot), - header = vmHeader, - com = com) + vmState = BaseVMState() + vmState.init(parent, vmHeader, com) vmState.stateDB.setCode(ks2, code) vmState.stateDB.addBalance( signer, 1.u256 * 1_000_000_000.u256 * 1_000_000_000.u256) # 1 ETH # Test data created for eth_getProof tests - let regularAcc = Hash32.fromHex("0x0000000000000000000000000000000000000001") + let regularAcc = Address.fromHex("0x0000000000000000000000000000000000000001") vmState.stateDB.addBalance(regularAcc, 2_000_000_000.u256) vmState.stateDB.setNonce(regularAcc, 1.uint64) - let contractAccWithStorage = Hash32.fromHex("0x0000000000000000000000000000000000000002") + let contractAccWithStorage = Address.fromHex("0x0000000000000000000000000000000000000002") vmState.stateDB.addBalance(contractAccWithStorage, 1_000_000_000.u256) vmState.stateDB.setNonce(contractAccWithStorage, 2.uint64) vmState.stateDB.setCode(contractAccWithStorage, code) vmState.stateDB.setStorage(contractAccWithStorage, u256(0), u256(1234)) vmState.stateDB.setStorage(contractAccWithStorage, u256(1), u256(2345)) - let contractAccNoStorage = Hash32.fromHex("0x0000000000000000000000000000000000000003") + let contractAccNoStorage = Address.fromHex("0x0000000000000000000000000000000000000003") vmState.stateDB.setCode(contractAccNoStorage, code) @@ -128,19 +138,19 @@ proc setupEnv(com: CommonRef, signer, ks2: Address, ctx: EthContext): TestEnv = unsignedTx1 = Transaction( txType : TxLegacy, nonce : 0, - gasPrice: 30_000_000_000, + gasPrice: uint64(30_000_000_000), gasLimit: 70_000, value : 1.u256, - to : some(zeroAddress), + to : Opt.some(zeroAddress), chainId : com.chainId, ) unsignedTx2 = Transaction( txType : TxLegacy, nonce : 1, - gasPrice: 30_000_000_100, + gasPrice: uint64(30_000_000_100), gasLimit: 70_000, value : 2.u256, - to : some(zeroAddress), + to : Opt.some(zeroAddress), chainId : com.chainId, ) eip155 = com.isEIP155(com.syncCurrent) @@ -159,13 +169,10 @@ proc setupEnv(com: CommonRef, signer, ks2: Address, ctx: EthContext): TestEnv = doAssert(rc.isOk, "Invalid transaction: " & rc.error) vmState.receipts[txIndex] = makeReceipt(vmState, tx.txType) - com.db.persistReceipts(vmState.receipts) let # TODO: `getColumn(CtReceipts)` does not exists anymore. There s only the # generic `MPT` left that can be retrieved with `getGeneric()`, # optionally with argument `clearData=true` - # - receiptRoot = com.db.ctx.getColumn(CtReceipts).state(updateOk=true).valueOr(EMPTY_ROOT_HASH) date = dateTime(2017, mMar, 30) timeStamp = date.toTime.toUnix.EthTime difficulty = com.calcDifficulty(timeStamp, parent) @@ -175,37 +182,43 @@ proc setupEnv(com: CommonRef, signer, ks2: Address, ctx: EthContext): TestEnv = var header = Header( parentHash : parentHash, - #coinbase*: Address - stateRoot : vmState.stateDB.getStateRoot(), - txRoot : txRoot, - receiptsRoot : receiptsRoot, - bloom : createBloom(vmState.receipts), + stateRoot : vmState.stateDB.getStateRoot, + transactionsRoot : txRoot, + receiptsRoot : calcReceiptsRoot(vmState.receipts), + logsBloom : createBloom(vmState.receipts), difficulty : difficulty, - blockNumber : blockNumber, + number : blockNumber, gasLimit : vmState.cumulativeGasUsed + 1_000_000, gasUsed : vmState.cumulativeGasUsed, timestamp : timeStamp - #extraData: Blob - #mixHash: Hash32 - #nonce: BlockNonce ) + com.db.persistHeader(header, + com.pos.isNil, com.startOfHistory).expect("persistHeader not error") + let uncles = [header] header.ommersHash = com.db.persistUncles(uncles) - doAssert com.db.persistHeader(header, - com.consensus == ConsensusType.POS) + com.db.persistHeader(header, + com.pos.isNil, com.startOfHistory).expect("persistHeader not error") + com.db.persistFixtureBlock() + + com.db.persistent(header.number).isOkOr: + echo "Failed to save state: ", $error + quit(QuitFailure) + result = TestEnv( txHash: signedTx1.rlpHash, - blockHash: header.hash + blockHash: header.blockHash ) + proc rpcMain*() = suite "Remote Procedure Calls": # TODO: Include other transports such as Http let - conf = makeTestConfig() + conf = makeConfig(@[]) ctx = newEthContext() ethNode = setupEthNode(conf, ctx, eth) com = CommonRef.new( @@ -213,12 +226,9 @@ proc rpcMain*() = conf.networkId, conf.networkParams ) - signer = Hash32 bytes32"0x0e69cde81b1aa07a45c32c6cd85d67229d36bb1b" - ks2 = Hash32 bytes32"0xa3b2222afa5c987da6ef773fde8d01b9f23d481f" - ks3 = Hash32 bytes32"0x597176e9a64aad0845d83afdaf698fbeff77703b" - - # disable POS/post Merge feature - com.setTTD none(DifficultyInt) + signer = Address.fromHex "0x0e69cde81b1aa07a45c32c6cd85d67229d36bb1b" + ks2 = Address.fromHex "0xa3b2222afa5c987da6ef773fde8d01b9f23d481f" + ks3 = Address.fromHex "0x597176e9a64aad0845d83afdaf698fbeff77703b" let keyStore = "tests" / "keystore" let res = ctx.am.loadKeystores(keyStore) @@ -232,27 +242,33 @@ proc rpcMain*() = debugEcho unlock.error doAssert(unlock.isOk) - let env = setupEnv(com, signer, ks2, ctx) + let + env = setupEnv(signer, ks2, ctx, com) + chain = ForkedChainRef.init(com) + txPool = TxPoolRef.new(com) - # Create Ethereum RPCs - let RPC_PORT = 0 # let the OS choose a port - var - rpcServer = newRpcSocketServer(["127.0.0.1:" & $RPC_PORT]) - client = newRpcSocketClient() - txPool = TxPoolRef.new(com, conf.engineSigner) - oracle = Oracle.new(com) + # txPool must be informed of active head + # so it can know the latest account state + doAssert txPool.smartHead(chain.latestHeader, chain) - setupCommonRpc(ethNode, conf, rpcServer) - setupEthRpc(ethNode, ctx, com, txPool, oracle, rpcServer) + let + server = newRpcHttpServerWithParams("127.0.0.1:0").valueOr: + quit(QuitFailure) + serverApi = newServerAPI(chain, txPool) + + setupServerAPI(serverApi, server, ctx) + setupCommonRpc(ethNode, conf, server) + + server.start() + let client = setupClient(server.localAddress[0].port) + + # disable POS/post Merge feature + com.setTTD Opt.none(DifficultyInt) - # Begin tests - rpcServer.start() - waitFor client.connect("127.0.0.1", rpcServer.localAddress[0].port) - # TODO: add more tests here test "web3_clientVersion": let res = await client.web3_clientVersion() - check res == NimbusIdent + check res == ClientId test "web3_sha3": let data = @(NimbusName.toOpenArrayByte(0, NimbusName.len-1)) @@ -274,15 +290,6 @@ proc rpcMain*() = let peerCount = ethNode.peerPool.connectedNodes.len check res == w3Qty(peerCount) - test "eth_protocolVersion": - let res = await client.eth_protocolVersion() - # Use a hard-coded number instead of the same expression as the client, - # so that bugs introduced via that expression are detected. Using the - # same expression as the client can hide issues when the value is wrong - # in both places. When the expected value genuinely changes, it'll be - # obvious. Just change this number. - check res == $ethVersion - test "eth_chainId": let res = await client.eth_chainId() check res == w3Qty(distinctBase(com.chainId)) @@ -293,24 +300,9 @@ proc rpcMain*() = let syncing = ethNode.peerPool.connectedNodes.len > 0 check syncing == false else: - check com.syncStart == res.syncObject.startingBlock.uint64.u256 - check com.syncCurrent == res.syncObject.currentBlock.uint64.u256 - check com.syncHighest == res.syncObject.highestBlock.uint64.u256 - - test "eth_coinbase": - let res = await client.eth_coinbase() - # currently we don't have miner - check res == default(Address) - - test "eth_mining": - let res = await client.eth_mining() - # currently we don't have miner - check res == false - - test "eth_hashrate": - let res = await client.eth_hashrate() - # currently we don't have miner - check res == w3Qty(0'u64) + check com.syncStart == res.syncObject.startingBlock.uint64 + check com.syncCurrent == res.syncObject.currentBlock.uint64 + check com.syncHighest == res.syncObject.highestBlock.uint64 test "eth_gasPrice": let res = await client.eth_gasPrice() @@ -327,23 +319,23 @@ proc rpcMain*() = check res == w3Qty(0x1'u64) test "eth_getBalance": - let a = await client.eth_getBalance(Hash32.fromHex("0xfff33a3bd36abdbd412707b8e310d6011454a7ae"), blockId(0'u64)) + let a = await client.eth_getBalance(Address.fromHex("0xfff33a3bd36abdbd412707b8e310d6011454a7ae"), blockId(1'u64)) check a == UInt256.fromHex("0x1b1ae4d6e2ef5000000") - let b = await client.eth_getBalance(Hash32.fromHex("0xfff4bad596633479a2a29f9a8b3f78eefd07e6ee"), blockId(0'u64)) + let b = await client.eth_getBalance(Address.fromHex("0xfff4bad596633479a2a29f9a8b3f78eefd07e6ee"), blockId(1'u64)) check b == UInt256.fromHex("0x56bc75e2d63100000") - let c = await client.eth_getBalance(Hash32.fromHex("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(0'u64)) + let c = await client.eth_getBalance(Address.fromHex("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(1'u64)) check c == UInt256.fromHex("0x3635c9adc5dea00000") test "eth_getStorageAt": - let res = await client.eth_getStorageAt(Hash32.fromHex("0xfff33a3bd36abdbd412707b8e310d6011454a7ae"), 0.u256, blockId(0'u64)) - check default(Hash32) == res + let res = await client.eth_getStorageAt(Address.fromHex("0xfff33a3bd36abdbd412707b8e310d6011454a7ae"), 0.u256, blockId(1'u64)) + check FixedBytes[32](zeroHash32.data) == res test "eth_getTransactionCount": - let res = await client.eth_getTransactionCount(Hash32.fromHex("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(0'u64)) + let res = await client.eth_getTransactionCount(Address.fromHex("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(1'u64)) check res == w3Qty(0'u64) test "eth_getBlockTransactionCountByHash": - let hash = com.db.getBlockHash(0.BlockNumber) + let hash = com.db.getBlockHash(0'u64).expect("block hash exists") let res = await client.eth_getBlockTransactionCountByHash(hash) check res == w3Qty(0'u64) @@ -352,7 +344,7 @@ proc rpcMain*() = check res == w3Qty(0'u64) test "eth_getUncleCountByBlockHash": - let hash = com.db.getBlockHash(0.BlockNumber) + let hash = com.db.getBlockHash(0'u64).expect("block hash exists") let res = await client.eth_getUncleCountByBlockHash(hash) check res == w3Qty(0'u64) @@ -361,7 +353,7 @@ proc rpcMain*() = check res == w3Qty(0'u64) test "eth_getCode": - let res = await client.eth_getCode(Hash32.fromHex("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(0'u64)) + let res = await client.eth_getCode(Address.fromHex("0xfff7ac99c8e4feb60c9750054bdc14ce1857f181"), blockId(1'u64)) check res.len == 0 test "eth_sign": @@ -378,18 +370,18 @@ proc rpcMain*() = let msgData = "\x19Ethereum Signed Message:\n" & $msg.len & msg let msgDataBytes = @(msgData.toOpenArrayByte(0, msgData.len-1)) let msgHash = await client.web3_sha3(msgDataBytes) - let pubkey = recover(sig, SkMessage(msgHash.bytes)).tryGet() + let pubkey = recover(sig, SkMessage(msgHash.data)).tryGet() let recoveredAddr = pubkey.toCanonicalAddress() check recoveredAddr == signer # verified test "eth_signTransaction, eth_sendTransaction, eth_sendRawTransaction": var unsignedTx = TransactionArgs( - `from`: signer.some, - to: ks2.some, - gas: w3Qty(100000'u).some, - gasPrice: none(Quantity), - value: some 100.u256, - nonce: none(Quantity) + `from`: Opt.some(signer), + to: Opt.some(ks2), + gas: Opt.some(w3Qty(100000'u)), + gasPrice: Opt.none(Quantity), + value: Opt.some(100.u256), + nonce: Opt.none(Quantity) ) let signedTxBytes = await client.eth_signTransaction(unsignedTx) @@ -402,11 +394,11 @@ proc rpcMain*() = test "eth_call": var ec = TransactionArgs( - `from`: signer.some, - to: ks2.some, - gas: w3Qty(100000'u).some, - gasPrice: none(Quantity), - value: some 100.u256 + `from`: Opt.some(signer), + to: Opt.some(ks2), + gas: Opt.some(w3Qty(100000'u)), + gasPrice: Opt.none(Quantity), + value: Opt.some(100.u256) ) let res = await client.eth_call(ec, "latest") @@ -414,11 +406,11 @@ proc rpcMain*() = test "eth_estimateGas": var ec = TransactionArgs( - `from`: signer.some, - to: ks3.some, - gas: w3Qty(42000'u).some, - gasPrice: w3Qty(100'u).some, - value: some 100.u256 + `from`: Opt.some(signer), + to: Opt.some(ks3), + gas: Opt.some(w3Qty(42000'u)), + gasPrice: Opt.some(w3Qty(100'u)), + value: Opt.some(100.u256) ) let res = await client.eth_estimateGas(ec) @@ -441,14 +433,14 @@ proc rpcMain*() = test "eth_getTransactionByHash": let res = await client.eth_getTransactionByHash(env.txHash) check res.isNil.not - check res.number.get() == w3BlockNumber(1'u64) + check res.blockNumber.get() == w3Qty(1'u64) let res2 = await client.eth_getTransactionByHash(env.blockHash) check res2.isNil test "eth_getTransactionByBlockHashAndIndex": let res = await client.eth_getTransactionByBlockHashAndIndex(env.blockHash, w3Qty(0'u64)) check res.isNil.not - check res.number.get() == w3BlockNumber(1'u64) + check res.blockNumber.get() == w3Qty(1'u64) let res2 = await client.eth_getTransactionByBlockHashAndIndex(env.blockHash, w3Qty(3'u64)) check res2.isNil @@ -459,23 +451,34 @@ proc rpcMain*() = test "eth_getTransactionByBlockNumberAndIndex": let res = await client.eth_getTransactionByBlockNumberAndIndex("latest", w3Qty(1'u64)) check res.isNil.not - check res.number.get() == w3BlockNumber(1'u64) + check res.blockNumber.get() == w3Qty(1'u64) let res2 = await client.eth_getTransactionByBlockNumberAndIndex("latest", w3Qty(3'u64)) check res2.isNil - test "eth_getTransactionReceipt": - let res = await client.eth_getTransactionReceipt(env.txHash) - check res.isNil.not - check res.number == w3BlockNumber(1'u64) + # TODO: Solved with Issue #2700 - let res2 = await client.eth_getTransactionReceipt(env.blockHash) - check res2.isNil + # test "eth_getBlockReceipts": + # let recs = await client.eth_getBlockReceipts(blockId(1'u64)) + # check recs.isSome + # if recs.isSome: + # let receipts = recs.get + # check receipts.len == 2 + # check receipts[0].transactionIndex == 0.Quantity + # check receipts[1].transactionIndex == 1.Quantity + + # test "eth_getTransactionReceipt": + # let res = await client.eth_getTransactionReceipt(env.txHash) + # check res.isNil.not + # check res.blockNumber == w3Qty(1'u64) + + # let res2 = await client.eth_getTransactionReceipt(env.blockHash) + # check res2.isNil test "eth_getUncleByBlockHashAndIndex": let res = await client.eth_getUncleByBlockHashAndIndex(env.blockHash, w3Qty(0'u64)) check res.isNil.not - check res.number == w3BlockNumber(1'u64) + check res.number == w3Qty(1'u64) let res2 = await client.eth_getUncleByBlockHashAndIndex(env.blockHash, w3Qty(1'u64)) check res2.isNil @@ -486,7 +489,7 @@ proc rpcMain*() = test "eth_getUncleByBlockNumberAndIndex": let res = await client.eth_getUncleByBlockNumberAndIndex("latest", w3Qty(0'u64)) check res.isNil.not - check res.number == w3BlockNumber(1'u64) + check res.number == w3Qty(1'u64) let res2 = await client.eth_getUncleByBlockNumberAndIndex("latest", w3Qty(1'u64)) check res2.isNil @@ -495,7 +498,7 @@ proc rpcMain*() = let testHeader = getBlockHeader4514995() let testHash = testHeader.blockHash let filterOptions = FilterOptions( - blockHash: some(testHash), + blockHash: Opt.some(testHash), topics: @[] ) let logs = await client.eth_getLogs(filterOptions) @@ -507,41 +510,19 @@ proc rpcMain*() = for l in logs: check: l.blockHash.isSome() - l.blockHash.unsafeGet() == testHash - l.logIndex.unsafeGet() == w3Qty(i.uint64) - inc i - - test "eth_getLogs by blockNumber, no filters": - let testHeader = getBlockHeader4514995() - let testHash = testHeader.blockHash - let fBlock = blockId(testHeader.number) - let tBlock = blockId(testHeader.number) - let filterOptions = FilterOptions( - fromBlock: some(fBlock), - toBlock: some(tBlock) - ) - let logs = await client.eth_getLogs(filterOptions) - - check: - len(logs) == 54 - - var i = 0 - for l in logs: - check: - l.blockHash.isSome() - l.blockHash.unsafeGet() == testHash - l.logIndex.unsafeGet() == w3Qty(i.uint64) + l.blockHash.get() == testHash + l.logIndex.get() == w3Qty(i.uint64) inc i test "eth_getLogs by blockhash, filter logs at specific positions": let testHeader = getBlockHeader4514995() let testHash = testHeader.blockHash - let topic = Hash32.fromHex("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") - let topic1 = Hash32.fromHex("0x000000000000000000000000fdc183d01a793613736cd40a5a578f49add1772b") + let topic = Bytes32.fromHex("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") + let topic1 = Bytes32.fromHex("0x000000000000000000000000fdc183d01a793613736cd40a5a578f49add1772b") let filterOptions = FilterOptions( - blockHash: some(testHash), + blockHash: Opt.some(testHash), topics: @[ TopicOrList(kind: slkList, list: @[topic]), TopicOrList(kind: slkNull), @@ -559,16 +540,16 @@ proc rpcMain*() = let testHeader = getBlockHeader4514995() let testHash = testHeader.blockHash - let topic = Hash32.fromHex("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") - let topic1 = Hash32.fromHex("0xa64da754fccf55aa65a1f0128a648633fade3884b236e879ee9f64c78df5d5d7") + let topic = Bytes32.fromHex("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") + let topic1 = Bytes32.fromHex("0xa64da754fccf55aa65a1f0128a648633fade3884b236e879ee9f64c78df5d5d7") - let topic2 = Hash32.fromHex("0x000000000000000000000000e16c02eac87920033ac72fc55ee1df3151c75786") - let topic3 = Hash32.fromHex("0x000000000000000000000000b626a5facc4de1c813f5293ec3be31979f1d1c78") + let topic2 = Bytes32.fromHex("0x000000000000000000000000e16c02eac87920033ac72fc55ee1df3151c75786") + let topic3 = Bytes32.fromHex("0x000000000000000000000000b626a5facc4de1c813f5293ec3be31979f1d1c78") let filterOptions = FilterOptions( - blockHash: some(testHash), + blockHash: Opt.some(testHash), topics: @[ TopicOrList(kind: slkList, list: @[topic, topic1]), TopicOrList(kind: slkList, list: @[topic2, topic3]) @@ -586,7 +567,7 @@ proc rpcMain*() = block: # account doesn't exist let - address = Hash32.fromHex("0x0000000000000000000000000000000000000004") + address = Address.fromHex("0x0000000000000000000000000000000000000004") proofResponse = await client.eth_getProof(address, @[], blockId(1'u64)) storageProof = proofResponse.storageProof @@ -602,7 +583,7 @@ proc rpcMain*() = block: # account exists but requested slots don't exist let - address = Hash32.fromHex("0x0000000000000000000000000000000000000001") + address = Address.fromHex("0x0000000000000000000000000000000000000001") slot1Key = 0.u256 slot2Key = 1.u256 proofResponse = await client.eth_getProof(address, @[slot1Key, slot2Key], blockId(1'u64)) @@ -626,7 +607,7 @@ proc rpcMain*() = block: # contract account with no storage slots let - address = Hash32.fromHex("0x0000000000000000000000000000000000000003") + address = Address.fromHex("0x0000000000000000000000000000000000000003") slot1Key = 0.u256 # Doesn't exist proofResponse = await client.eth_getProof(address, @[slot1Key], blockId(1'u64)) storageProof = proofResponse.storageProof @@ -649,7 +630,7 @@ proc rpcMain*() = block: # contract account with storage slots let - address = Hash32.fromHex("0x0000000000000000000000000000000000000002") + address = Address.fromHex("0x0000000000000000000000000000000000000002") slot1Key = 0.u256 slot2Key = 1.u256 slot3Key = 2.u256 # Doesn't exist @@ -680,7 +661,7 @@ proc rpcMain*() = block: # externally owned account let - address = Hash32.fromHex("0x0000000000000000000000000000000000000001") + address = Address.fromHex("0x0000000000000000000000000000000000000001") proofResponse = await client.eth_getProof(address, @[], blockId(1'u64)) storageProof = proofResponse.storageProof @@ -696,28 +677,10 @@ proc rpcMain*() = test "eth_getProof - Multiple blocks": let blockData = await client.eth_getBlockByNumber("latest", true) - block: - # block 0 - account doesn't exist yet - let - address = Hash32.fromHex("0x0000000000000000000000000000000000000002") - slot1Key = 100.u256 - proofResponse = await client.eth_getProof(address, @[slot1Key], blockId(0'u64)) - storageProof = proofResponse.storageProof - - check: - proofResponse.address == address - verifyAccountProof(blockData.stateRoot, proofResponse).kind == InvalidProof - proofResponse.balance == 0.u256 - proofResponse.codeHash == zeroHash() - proofResponse.nonce == w3Qty(0.uint64) - proofResponse.storageHash == zeroHash() - storageProof.len() == 1 - verifySlotProof(proofResponse.storageHash, storageProof[0]).kind == InvalidProof - block: # block 1 - account has balance, code and storage let - address = Hash32.fromHex("0x0000000000000000000000000000000000000002") + address = Address.fromHex("0x0000000000000000000000000000000000000002") slot2Key = 1.u256 proofResponse = await client.eth_getProof(address, @[slot2Key], blockId(1'u64)) storageProof = proofResponse.storageProof @@ -732,17 +695,13 @@ proc rpcMain*() = storageProof.len() == 1 verifySlotProof(proofResponse.storageHash, storageProof[0]).isValid() - test "eth_getBlockReceipts": - let recs = await client.eth_getBlockReceipts(blockId("latest")) - check recs.isSome - if recs.isSome: - let receipts = recs.get - check receipts.len == 2 - check receipts[0].transactionIndex == 0.Quantity - check receipts[1].transactionIndex == 1.Quantity + close(client, server) - rpcServer.stop() - rpcServer.close() +proc setErrorLevel* = + discard + when defined(chronicles_runtime_filtering) and loggingEnabled: + setLogLevel(LogLevel.ERROR) when isMainModule: + setErrorLevel() rpcMain() diff --git a/tests/test_tracer_json.nim b/tests/test_tracer_json.nim index 05a89d84eb..3676456dec 100644 --- a/tests/test_tracer_json.nim +++ b/tests/test_tracer_json.nim @@ -16,7 +16,7 @@ import results, ./test_helpers, ../nimbus/db/aristo, - ../nimbus/db/aristo/[aristo_desc, aristo_layers, aristo_nearby, aristo_part], + ../nimbus/db/aristo/[aristo_desc, aristo_layers, aristo_part], ../nimbus/db/aristo/aristo_part/part_debug, ../nimbus/db/kvt/kvt_utils, ../nimbus/[tracer, evm/types], @@ -67,49 +67,50 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = ps.partPut(proof, AutomaticPayload).isOkOr: raiseAssert info & ": partPut => " & $error - # Handle transaction sub-tree - if txRoot.isValid: - var txs: seq[Transaction] - for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot): - let - inx = key.path.to(UInt256).truncate(uint) - tx = rlp.decode(pyl.rawBlob, Transaction) - # - # FIXME: Is this might be a bug in the test data? - # - # The single item test key is always `128`. For non-single test - # lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit - # number `N`.) - # - # Unless the `128` item value is put at the start of the argument - # list `txs[]` for `persistTransactions()`, the `tracer` module - # will throw an exception at - # `doAssert(transactions.calcTxRoot == header.txRoot)` in the - # function `traceTransactionImpl()`. - # - if (inx and 0x80) != 0: - txs = @[tx] & txs - else: - txs.add tx - cdb.persistTransactions(num, txRoot, txs) - - # Handle receipts sub-tree - if rcptRoot.isValid: - var rcpts: seq[Receipt] - for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot): - let - inx = key.path.to(UInt256).truncate(uint) - rcpt = rlp.decode(pyl.rawBlob, Receipt) - # FIXME: See comment at `txRoot` section. - if (inx and 0x80) != 0: - rcpts = @[rcpt] & rcpts - else: - rcpts.add rcpt - cdb.persistReceipts(rcptRoot, rcpts) - - # Save keys to database - for (rvid,key) in ps.vkPairs: - adb.layersPutKey(rvid, key) + # TODO code needs updating after removal of generic payloads + # # Handle transaction sub-tree + # if txRoot.isValid: + # var txs: seq[Transaction] + # for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot): + # let + # inx = key.path.to(UInt256).truncate(uint) + # tx = rlp.decode(pyl.rawBlob, Transaction) + # # + # # FIXME: Is this might be a bug in the test data? + # # + # # The single item test key is always `128`. For non-single test + # # lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit + # # number `N`.) + # # + # # Unless the `128` item value is put at the start of the argument + # # list `txs[]` for `persistTransactions()`, the `tracer` module + # # will throw an exception at + # # `doAssert(transactions.calcTxRoot == header.txRoot)` in the + # # function `traceTransactionImpl()`. + # # + # if (inx and 0x80) != 0: + # txs = @[tx] & txs + # else: + # txs.add tx + # cdb.persistTransactions(num, txRoot, txs) + + # # Handle receipts sub-tree + # if rcptRoot.isValid: + # var rcpts: seq[Receipt] + # for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot): + # let + # inx = key.path.to(UInt256).truncate(uint) + # rcpt = rlp.decode(pyl.rawBlob, Receipt) + # # FIXME: See comment at `txRoot` section. + # if (inx and 0x80) != 0: + # rcpts = @[rcpt] & rcpts + # else: + # rcpts.add rcpt + # cdb.persistReceipts(rcptRoot, rcpts) + + # # Save keys to database + # for (rvid,key) in ps.vkPairs: + # adb.layersPutKey(rvid, key) ps.check().isOkOr: raiseAssert info & ": check => " & $error @@ -121,7 +122,9 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) = #if true: quit() # use tracerTestGen.nim to generate additional test data -proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) = +proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) {.deprecated: "needs fixing for non-generic payloads".} = + block: + return setErrorLevel() var @@ -138,7 +141,7 @@ proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: C # Some hack for `Aristo` using the `snap` protocol proof-loader memoryDB.preLoadAristoDb(state, blockNumber) - var blk = com.db.getEthBlock(blockNumber) + var blk = com.db.getEthBlock(blockNumber).expect("eth block exists") let txTraces = traceTransactions(com, blk.header, blk.transactions) let stateDump = dumpBlockState(com, blk) diff --git a/tests/test_txpool2.nim b/tests/test_txpool2.nim index 3abd5fa2af..b652731cfa 100644 --- a/tests/test_txpool2.nim +++ b/tests/test_txpool2.nim @@ -277,7 +277,7 @@ proc runTxHeadDelta(noisy = true) = xp = env.xp com = env.com chain = env.chain - head = com.db.getCanonicalHead() + head = com.db.getCanonicalHead().expect("canonical head exists") timestamp = head.timestamp const @@ -327,7 +327,7 @@ proc runTxHeadDelta(noisy = true) = setErrorLevel() # in case we set trace level check com.syncCurrent == 10.BlockNumber - head = com.db.getBlockHeader(com.syncCurrent) + head = com.db.getBlockHeader(com.syncCurrent).expect("block header exists") let sdb = LedgerRef.init(com.db) expected = u256(txPerblock * numBlocks) * amount diff --git a/tests/test_wire_protocol.nim b/tests/test_wire_protocol.nim deleted file mode 100644 index 018c3ebf94..0000000000 --- a/tests/test_wire_protocol.nim +++ /dev/null @@ -1,51 +0,0 @@ -# Nimbus -# Copyright (c) 2019-2023 Status Research & Development GmbH -# Licensed under either of -# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or -# http://www.apache.org/licenses/LICENSE-2.0) -# * MIT license ([LICENSE-MIT](LICENSE-MIT) or -# http://opensource.org/licenses/MIT) -# at your option. This file may not be copied, modified, or distributed except -# according to those terms. - -import - eth/p2p, eth/p2p/rlpx, - chronos, testutils/unittests, - ../nimbus/sync/protocol - -var nextPort = 30303 - -proc localAddress*(port: int): Address = - let port = Port(port) - result = Address(udpPort: port, tcpPort: port, - ip: parseIpAddress("127.0.0.1")) - -proc setupTestNode*( - rng: ref HmacDrbgContext, - capabilities: varargs[ProtocolInfo, `protocolInfo`]): EthereumNode {.gcsafe.} = - # Don't create new RNG every time in production code! - let keys1 = KeyPair.random(rng[]) - var node = newEthereumNode( - keys1, localAddress(nextPort), NetworkId(1), - addAllCapabilities = false, - bindUdpPort = Port(nextPort), bindTcpPort = Port(nextPort), - rng = rng) - nextPort.inc - for capability in capabilities: - node.addCapability capability - - node - - -suite "Testing protocol handlers": - asyncTest "Failing connection handler": - let rng = newRng() - - var node1 = setupTestNode(rng, eth) - var node2 = setupTestNode(rng, eth) - node2.startListening() - let peer = await node1.rlpxConnect(newNode(node2.toENode())) - check: - peer.isNil == false - # To check if the disconnection handler did not run - #node1.protocolState(eth).count == 0 diff --git a/tools/evmstate/config.nim b/tools/evmstate/config.nim index f542306ea5..bd3fbb7329 100644 --- a/tools/evmstate/config.nim +++ b/tools/evmstate/config.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -9,7 +9,7 @@ # according to those terms. import - std/[os, options], + std/[os, options, strutils], confutils, confutils/defs export @@ -79,8 +79,10 @@ type argument }: string const - Copyright = "Copyright (c) 2022 Status Research & Development GmbH" - Version = "Nimbus-evmstate 0.1.0" + Copyright = "Copyright (c) 2022-" & + CompileDate.split('-')[0] & + " Status Research & Development GmbH" + Version = "Nimbus-evmstate 0.1.2" proc init*(_: type StateConf, cmdLine = commandLineParams()): StateConf = {.push warning[ProveInit]: off.} diff --git a/tools/t8n/config.nim b/tools/t8n/config.nim index a505b04937..6c87513d36 100644 --- a/tools/t8n/config.nim +++ b/tools/t8n/config.nim @@ -1,5 +1,5 @@ # Nimbus -# Copyright (c) 2022-2023 Status Research & Development GmbH +# Copyright (c) 2022-2024 Status Research & Development GmbH # Licensed under either of # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # http://www.apache.org/licenses/LICENSE-2.0) @@ -171,8 +171,10 @@ proc convertToNimStyle(cmds: openArray[string]): seq[string] = inc i const - Copyright = "Copyright (c) 2022 Status Research & Development GmbH" - Version = "Nimbus-t8n 0.2.2" + Copyright = "Copyright (c) 2022-" & + CompileDate.split('-')[0] & + " Status Research & Development GmbH" + Version = "Nimbus-t8n 0.2.4" # force the compiler to instantiate T8NConf.load # rather than have to export parseCmdArg diff --git a/tools/t8n/helpers.nim b/tools/t8n/helpers.nim index 48df379cb9..bb77d7ef66 100644 --- a/tools/t8n/helpers.nim +++ b/tools/t8n/helpers.nim @@ -191,6 +191,7 @@ proc readValue*(r: var JsonReader[T8Conv], val: var EnvStruct) of "blockHashes": r.readValue(val.blockHashes) of "ommers": r.readValue(val.ommers) of "withdrawals": r.readValue(val.withdrawals) + of "depositContractAddress": r.readValue(val.depositContractAddress) else: discard r.readValue(JsonString) if not currentCoinbaseParsed: @@ -211,7 +212,7 @@ proc readValue*(r: var JsonReader[T8Conv], val: var TransContext) of "txs" : r.readValue(val.txsJson) of "txsRlp" : r.readValue(val.txsRlp) -proc parseTxJson(txo: TxObject, chainId: ChainID): Result[Transaction, string] = +proc parseTxJson(txo: TxObject, chainId: ChainId): Result[Transaction, string] = template required(field) = const fName = astToStr(oField) if txo.field.isNone: @@ -235,10 +236,10 @@ proc parseTxJson(txo: TxObject, chainId: ChainID): Result[Transaction, string] = required(value) required(input, payload) tx.to = txo.to - tx.chainId = chainId case tx.txType of TxLegacy: + tx.chainId = chainId required(gasPrice) of TxEip2930: required(gasPrice) @@ -263,6 +264,10 @@ proc parseTxJson(txo: TxObject, chainId: ChainID): Result[Transaction, string] = optional(accessList) required(authorizationList) + # Ignore chainId if txType == TxLegacy + if tx.txType > TxLegacy and tx.chainId != chainId: + return err("invalid chain id: have " & $tx.chainId & " want " & $chainId) + let eip155 = txo.protected.get(true) if txo.secretKey.isSome: let secretKey = PrivateKey.fromRaw(txo.secretKey.get).valueOr: @@ -274,13 +279,17 @@ proc parseTxJson(txo: TxObject, chainId: ChainID): Result[Transaction, string] = required(s, S) ok(tx) -proc readNestedTx(rlp: var Rlp): Result[Transaction, string] = +proc readNestedTx(rlp: var Rlp, chainId: ChainId): Result[Transaction, string] = try: - ok if rlp.isList: + let tx = if rlp.isList: rlp.read(Transaction) else: var rr = rlpFromBytes(rlp.read(seq[byte])) rr.read(Transaction) + # Ignore chainId if txType == TxLegacy + if tx.txType > TxLegacy and tx.chainId != chainId: + return err("invalid chain id: have " & $tx.chainId & " want " & $chainId) + ok(tx) except RlpError as exc: err(exc.msg) @@ -301,40 +310,38 @@ proc parseTxs*(ctx: var TransContext, chainId: ChainId) if ctx.txsRlp.len > 0: for item in rlp: - ctx.txList.add rlp.readNestedTx() + ctx.txList.add rlp.readNestedTx(chainId) proc filterGoodTransactions*(ctx: TransContext): seq[Transaction] = for txRes in ctx.txList: if txRes.isOk: result.add txRes.get -template wrapException(procName: string, body) = +template wrapException(body) = try: body except SerializationError as exc: - debugEcho "procName: ", procName raise newError(ErrorJson, exc.msg) except IOError as exc: - debugEcho "procName: ", procName raise newError(ErrorJson, exc.msg) proc parseTxsJson*(ctx: var TransContext, jsonFile: string) {.raises: [T8NError].} = - wrapException("parseTxsJson"): + wrapException: ctx.txsJson = T8Conv.loadFile(jsonFile, seq[TxObject]) proc parseAlloc*(ctx: var TransContext, allocFile: string) {.raises: [T8NError].} = - wrapException("parseAlloc"): + wrapException: ctx.alloc = T8Conv.loadFile(allocFile, GenesisAlloc) proc parseEnv*(ctx: var TransContext, envFile: string) {.raises: [T8NError].} = - wrapException("parseEnv"): + wrapException: ctx.env = T8Conv.loadFile(envFile, EnvStruct) proc parseTxsRlp*(ctx: var TransContext, hexData: string) {.raises: [ValueError].} = ctx.txsRlp = hexToSeqByte(hexData) proc parseInputFromStdin*(ctx: var TransContext) {.raises: [T8NError].} = - wrapException("parseInputFromStdin"): + wrapException: let jsonData = stdin.readAll() ctx = T8Conv.decode(jsonData, TransContext) @@ -435,6 +442,11 @@ proc `@@`[T](x: seq[T]): JsonNode = for c in x: result.add @@(c) +proc `@@`[N, T](x: array[N, T]): JsonNode = + result = newJArray() + for c in x: + result.add @@(c) + proc `@@`[T](x: Opt[T]): JsonNode = if x.isNone: newJNull() @@ -464,3 +476,5 @@ proc `@@`*(x: ExecutionResult): JsonNode = result["blobGasUsed"] = @@(x.blobGasUsed) if x.requestsHash.isSome: result["requestsHash"] = @@(x.requestsHash) + if x.requests.isSome: + result["requests"] = @@(x.requests) diff --git a/tools/t8n/t8n_test.nim b/tools/t8n/t8n_test.nim index e47c565a6c..7840402cdd 100644 --- a/tools/t8n/t8n_test.nim +++ b/tools/t8n/t8n_test.nim @@ -20,6 +20,7 @@ type inEnv : string stFork : string stReward: string + chainid : string T8nOutput = object alloc : bool @@ -39,13 +40,15 @@ type path: string error: string -proc t8nInput(alloc, txs, env, fork, reward: string): T8nInput = +proc t8nInput(alloc, txs, env, fork: string; + reward = "0"; chainid = ""): T8nInput = T8nInput( inAlloc : alloc, inTxs : txs, inEnv : env, stFork : fork, - stReward: reward + stReward: reward, + chainid : chainid, ) proc get(opt: T8nInput, base : string): string = @@ -55,6 +58,8 @@ proc get(opt: T8nInput, base : string): string = result.add(" --state.fork " & opt.stFork) if opt.stReward.len > 0: result.add(" --state.reward " & opt.stReward) + if opt.chainid.len > 0: + result.add(" --state.chainid " & opt.chainid) proc get(opt: T8nOutput): string = if opt.alloc and not opt.trace: @@ -165,14 +170,19 @@ proc runTest(appDir: string, spec: TestSpec): bool = if spec.expOut.len > 0: if spec.expOut.endsWith(".json"): let path = base / spec.expOut - let want = json.parseFile(path) - let have = json.parseJson(res) - var jsc = JsonComparator() - if not jsc.cmp(want, have, "root") and notRejectedError(jsc.path): - echo "test $1: output wrong, have \n$2\nwant\n$3\n" % - [spec.name, have.pretty, want.pretty] - echo "path: $1, error: $2" % - [jsc.path, jsc.error] + try: + let want = json.parseFile(path) + let have = json.parseJson(res) + var jsc = JsonComparator() + if not jsc.cmp(want, have, "root") and notRejectedError(jsc.path): + echo "test $1: output wrong, have \n$2\nwant\n$3\n" % + [spec.name, have.pretty, want.pretty] + echo "path: $1, error: $2" % + [jsc.path, jsc.error] + return false + except JsonParsingError as exc: + echo "test $1: ERROR: $2" % [spec.name, exc.msg] + echo "test $1: OUTPUT: $2" % [spec.name, res] return false else: # compare as regular text @@ -496,7 +506,7 @@ const name : "GasUsedHigherThanBlockGasLimitButNotWithRefundsSuicideLast_Frontier", base : "testdata/00-516", input : t8nInput( - "alloc.json", "txs.rlp", "env.json", "Frontier", "5000000000000000000", + "alloc.json", "txs.rlp", "env.json", "Frontier", "5000000000000000000" ), output: T8nOutput(alloc: true, result: true), expOut: "exp.json", @@ -609,6 +619,33 @@ const output: T8nOutput(result: true), expOut: "exp.json", ), + TestSpec( + name : "Different --state.chainid and tx.chainid", + base : "testdata/00-525", + input : t8nInput( + "alloc.json", "txs.rlp", "env.json", "Prague", + ), + output: T8nOutput(result: true), + expOut: "exp1.json", + ), + TestSpec( + name : "Prague execution requests", + base : "testdata/00-525", + input : t8nInput( + "alloc.json", "txs.rlp", "env.json", "Prague", "", "7078815900" + ), + output: T8nOutput(result: true), + expOut: "exp2.json", + ), + TestSpec( + name : "Prague depositContractAddress", + base : "testdata/00-525", + input : t8nInput( + "alloc.json", "txs.rlp", "env_dca.json", "Prague", "", "7078815900" + ), + output: T8nOutput(result: true), + expOut: "exp3.json", + ), ] proc main() = diff --git a/tools/t8n/testdata/00-519/exp.txt b/tools/t8n/testdata/00-519/exp.txt index d9509a3367..253d4518b1 100644 --- a/tools/t8n/testdata/00-519/exp.txt +++ b/tools/t8n/testdata/00-519/exp.txt @@ -1,2 +1,2 @@ {"pc":0,"op":0,"gas":"0x0","gasCost":"0xfffffffffffecb68","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"STOP","error":"PrcInvalidParam"} -{"output":"","gasUsed":"0x13498","error":"PrcInvalidParam"} +{"output":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","gasUsed":"0x13498","error":"PrcInvalidParam"} diff --git a/tools/t8n/testdata/00-525/alloc.json b/tools/t8n/testdata/00-525/alloc.json new file mode 100644 index 0000000000..ecfe5f9c80 --- /dev/null +++ b/tools/t8n/testdata/00-525/alloc.json @@ -0,0 +1,72 @@ +{ + "0xcf49fda3be353c69b41ed96333cd24302da4556f" : { + "balance" : "0xF51370DC5C37F0000", + "code" : "0x", + "nonce" : "0x07", + "storage" : { + } + }, + "0x4242424242424242424242424242424242424242": { + "balance": "0", + "code": "0x60806040526004361061003f5760003560e01c806301ffc9a71461004457806322895118146100a4578063621fd130146101ba578063c5f2892f14610244575b600080fd5b34801561005057600080fd5b506100906004803603602081101561006757600080fd5b50357fffffffff000000000000000000000000000000000000000000000000000000001661026b565b604080519115158252519081900360200190f35b6101b8600480360360808110156100ba57600080fd5b8101906020810181356401000000008111156100d557600080fd5b8201836020820111156100e757600080fd5b8035906020019184600183028401116401000000008311171561010957600080fd5b91939092909160208101903564010000000081111561012757600080fd5b82018360208201111561013957600080fd5b8035906020019184600183028401116401000000008311171561015b57600080fd5b91939092909160208101903564010000000081111561017957600080fd5b82018360208201111561018b57600080fd5b803590602001918460018302840111640100000000831117156101ad57600080fd5b919350915035610304565b005b3480156101c657600080fd5b506101cf6110b5565b6040805160208082528351818301528351919283929083019185019080838360005b838110156102095781810151838201526020016101f1565b50505050905090810190601f1680156102365780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561025057600080fd5b506102596110c7565b60408051918252519081900360200190f35b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f01ffc9a70000000000000000000000000000000000000000000000000000000014806102fe57507fffffffff0000000000000000000000000000000000000000000000000000000082167f8564090700000000000000000000000000000000000000000000000000000000145b92915050565b6030861461035d576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118056026913960400191505060405180910390fd5b602084146103b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252603681526020018061179c6036913960400191505060405180910390fd5b6060821461040f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260298152602001806118786029913960400191505060405180910390fd5b670de0b6b3a7640000341015610470576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260268152602001806118526026913960400191505060405180910390fd5b633b9aca003406156104cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260338152602001806117d26033913960400191505060405180910390fd5b633b9aca00340467ffffffffffffffff811115610535576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252602781526020018061182b6027913960400191505060405180910390fd5b6060610540826114ba565b90507f649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c589898989858a8a6105756020546114ba565b6040805160a0808252810189905290819060208201908201606083016080840160c085018e8e80828437600083820152601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690910187810386528c815260200190508c8c808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01690920188810386528c5181528c51602091820193918e019250908190849084905b83811015610648578181015183820152602001610630565b50505050905090810190601f1680156106755780820380516001836020036101000a031916815260200191505b5086810383528881526020018989808284376000838201819052601f9091017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169092018881038452895181528951602091820193918b019250908190849084905b838110156106ef5781810151838201526020016106d7565b50505050905090810190601f16801561071c5780820380516001836020036101000a031916815260200191505b509d505050505050505050505050505060405180910390a1600060028a8a600060801b604051602001808484808284377fffffffffffffffffffffffffffffffff0000000000000000000000000000000090941691909301908152604080517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0818403018152601090920190819052815191955093508392506020850191508083835b602083106107fc57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016107bf565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610859573d6000803e3d6000fd5b5050506040513d602081101561086e57600080fd5b5051905060006002806108846040848a8c6116fe565b6040516020018083838082843780830192505050925050506040516020818303038152906040526040518082805190602001908083835b602083106108f857805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016108bb565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610955573d6000803e3d6000fd5b5050506040513d602081101561096a57600080fd5b5051600261097b896040818d6116fe565b60405160009060200180848480828437919091019283525050604080518083038152602092830191829052805190945090925082918401908083835b602083106109f457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090920191602091820191016109b7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610a51573d6000803e3d6000fd5b5050506040513d6020811015610a6657600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610ada57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610a9d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610b37573d6000803e3d6000fd5b5050506040513d6020811015610b4c57600080fd5b50516040805160208101858152929350600092600292839287928f928f92018383808284378083019250505093505050506040516020818303038152906040526040518082805190602001908083835b60208310610bd957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610b9c565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610c36573d6000803e3d6000fd5b5050506040513d6020811015610c4b57600080fd5b50516040518651600291889160009188916020918201918291908601908083835b60208310610ca957805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610c6c565b6001836020036101000a0380198251168184511680821785525050505050509050018367ffffffffffffffff191667ffffffffffffffff1916815260180182815260200193505050506040516020818303038152906040526040518082805190602001908083835b60208310610d4e57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610d11565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610dab573d6000803e3d6000fd5b5050506040513d6020811015610dc057600080fd5b5051604080516020818101949094528082019290925280518083038201815260609092019081905281519192909182918401908083835b60208310610e3457805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610df7565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015610e91573d6000803e3d6000fd5b5050506040513d6020811015610ea657600080fd5b50519050858114610f02576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260548152602001806117486054913960600191505060405180910390fd5b60205463ffffffff11610f60576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260218152602001806117276021913960400191505060405180910390fd5b602080546001019081905560005b60208110156110a9578160011660011415610fa0578260008260208110610f9157fe5b0155506110ac95505050505050565b600260008260208110610faf57fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061102557805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101610fe8565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa158015611082573d6000803e3d6000fd5b5050506040513d602081101561109757600080fd5b50519250600282049150600101610f6e565b50fe5b50505050505050565b60606110c26020546114ba565b905090565b6020546000908190815b60208110156112f05781600116600114156111e6576002600082602081106110f557fe5b01548460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061116b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161112e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156111c8573d6000803e3d6000fd5b5050506040513d60208110156111dd57600080fd5b505192506112e2565b600283602183602081106111f657fe5b015460405160200180838152602001828152602001925050506040516020818303038152906040526040518082805190602001908083835b6020831061126b57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161122e565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa1580156112c8573d6000803e3d6000fd5b5050506040513d60208110156112dd57600080fd5b505192505b6002820491506001016110d1565b506002826112ff6020546114ba565b600060401b6040516020018084815260200183805190602001908083835b6020831061135a57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0909201916020918201910161131d565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000095909516920191825250604080518083037ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8018152601890920190819052815191955093508392850191508083835b6020831061143f57805182527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe09092019160209182019101611402565b51815160209384036101000a7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01801990921691161790526040519190930194509192505080830381855afa15801561149c573d6000803e3d6000fd5b5050506040513d60208110156114b157600080fd5b50519250505090565b60408051600880825281830190925260609160208201818036833701905050905060c082901b8060071a60f81b826000815181106114f457fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060061a60f81b8260018151811061153757fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060051a60f81b8260028151811061157a57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060041a60f81b826003815181106115bd57fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060031a60f81b8260048151811061160057fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060021a60f81b8260058151811061164357fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060011a60f81b8260068151811061168657fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053508060001a60f81b826007815181106116c957fe5b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535050919050565b6000808585111561170d578182fd5b83861115611719578182fd5b505082019391909203915056fe4465706f736974436f6e74726163743a206d65726b6c6520747265652066756c6c4465706f736974436f6e74726163743a207265636f6e7374727563746564204465706f7369744461746120646f6573206e6f74206d6174636820737570706c696564206465706f7369745f646174615f726f6f744465706f736974436f6e74726163743a20696e76616c6964207769746864726177616c5f63726564656e7469616c73206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c7565206e6f74206d756c7469706c65206f6620677765694465706f736974436f6e74726163743a20696e76616c6964207075626b6579206c656e6774684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f20686967684465706f736974436f6e74726163743a206465706f7369742076616c756520746f6f206c6f774465706f736974436f6e74726163743a20696e76616c6964207369676e6174757265206c656e677468a2646970667358221220dceca8706b29e917dacf25fceef95acac8d90d765ac926663ce4096195952b6164736f6c634300060b0033", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000022": "0xf5a5fd42d16a20302798ef6ed309979b43003d2320d9f0e8ea9831a92759fb4b", + "0x0000000000000000000000000000000000000000000000000000000000000023": "0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71", + "0x0000000000000000000000000000000000000000000000000000000000000024": "0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c", + "0x0000000000000000000000000000000000000000000000000000000000000025": "0x536d98837f2dd165a55d5eeae91485954472d56f246df256bf3cae19352a123c", + "0x0000000000000000000000000000000000000000000000000000000000000026": "0x9efde052aa15429fae05bad4d0b1d7c64da64d03d7a1854a588c2cb8430c0d30", + "0x0000000000000000000000000000000000000000000000000000000000000027": "0xd88ddfeed400a8755596b21942c1497e114c302e6118290f91e6772976041fa1", + "0x0000000000000000000000000000000000000000000000000000000000000028": "0x87eb0ddba57e35f6d286673802a4af5975e22506c7cf4c64bb6be5ee11527f2c", + "0x0000000000000000000000000000000000000000000000000000000000000029": "0x26846476fd5fc54a5d43385167c95144f2643f533cc85bb9d16b782f8d7db193", + "0x000000000000000000000000000000000000000000000000000000000000002a": "0x506d86582d252405b840018792cad2bf1259f1ef5aa5f887e13cb2f0094f51e1", + "0x000000000000000000000000000000000000000000000000000000000000002b": "0xffff0ad7e659772f9534c195c815efc4014ef1e1daed4404c06385d11192e92b", + "0x000000000000000000000000000000000000000000000000000000000000002c": "0x6cf04127db05441cd833107a52be852868890e4317e6a02ab47683aa75964220", + "0x000000000000000000000000000000000000000000000000000000000000002d": "0xb7d05f875f140027ef5118a2247bbb84ce8f2f0f1123623085daf7960c329f5f", + "0x000000000000000000000000000000000000000000000000000000000000002e": "0xdf6af5f5bbdb6be9ef8aa618e4bf8073960867171e29676f8b284dea6a08a85e", + "0x000000000000000000000000000000000000000000000000000000000000002f": "0xb58d900f5e182e3c50ef74969ea16c7726c549757cc23523c369587da7293784", + "0x0000000000000000000000000000000000000000000000000000000000000030": "0xd49a7502ffcfb0340b1d7885688500ca308161a7f96b62df9d083b71fcc8f2bb", + "0x0000000000000000000000000000000000000000000000000000000000000031": "0x8fe6b1689256c0d385f42f5bbe2027a22c1996e110ba97c171d3e5948de92beb", + "0x0000000000000000000000000000000000000000000000000000000000000032": "0x8d0d63c39ebade8509e0ae3c9c3876fb5fa112be18f905ecacfecb92057603ab", + "0x0000000000000000000000000000000000000000000000000000000000000033": "0x95eec8b2e541cad4e91de38385f2e046619f54496c2382cb6cacd5b98c26f5a4", + "0x0000000000000000000000000000000000000000000000000000000000000034": "0xf893e908917775b62bff23294dbbe3a1cd8e6cc1c35b4801887b646a6f81f17f", + "0x0000000000000000000000000000000000000000000000000000000000000035": "0xcddba7b592e3133393c16194fac7431abf2f5485ed711db282183c819e08ebaa", + "0x0000000000000000000000000000000000000000000000000000000000000036": "0x8a8d7fe3af8caa085a7639a832001457dfb9128a8061142ad0335629ff23ff9c", + "0x0000000000000000000000000000000000000000000000000000000000000037": "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0x0000000000000000000000000000000000000000000000000000000000000038": "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x0000000000000000000000000000000000000000000000000000000000000039": "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x000000000000000000000000000000000000000000000000000000000000003a": "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x000000000000000000000000000000000000000000000000000000000000003b": "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x000000000000000000000000000000000000000000000000000000000000003c": "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x000000000000000000000000000000000000000000000000000000000000003d": "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x000000000000000000000000000000000000000000000000000000000000003e": "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0x000000000000000000000000000000000000000000000000000000000000003f": "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x0000000000000000000000000000000000000000000000000000000000000040": "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7" + } + }, + "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500" + }, + "0x0aae40965e6800cd9b1f4b05ff21581047e3f91e": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460575767ffffffffffffffff5f3511605357600143035f3511604b575f35612000014311604b57611fff5f3516545f5260205ff35b5f5f5260205ff35b5f5ffd5b5f35611fff60014303165500" + }, + "0x09Fc772D0857550724b07B850a4323f39112aAaA": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460c7573615156028575f545f5260205ff35b36603814156101f05760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f057600182026001905f5b5f821115608057810190830284830290049160010191906065565b9093900434106101f057600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160db575060105b5f5b81811461017f5780604c02838201600302600401805490600101805490600101549160601b83528260140152807fffffffffffffffffffffffffffffffff0000000000000000000000000000000016826034015260401c906044018160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160dd565b9101809214610191579060025561019c565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101c957505f5b6001546002828201116101de5750505f6101e4565b01600290035b5f555f600155604c025ff35b5f5ffd", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + }, + "0x01aBEa29659e5e97C95107F20bb753cD3e09bBBb": { + "balance": "0", + "nonce": "1", + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460cf573615156028575f545f5260205ff35b366060141561019a5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f821115608057810190830284830290049160010191906065565b90939004341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060011160e3575060015b5f5b8181146101295780607402838201600402600401805490600101805490600101805490600101549260601b84529083601401528260340152906054015260010160e5565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + } + } +} diff --git a/tools/t8n/testdata/00-525/env.json b/tools/t8n/testdata/00-525/env.json new file mode 100644 index 0000000000..5b72a6929d --- /dev/null +++ b/tools/t8n/testdata/00-525/env.json @@ -0,0 +1,23 @@ +{ + "blockHashes": { + }, + "currentBaseFee": "0x7", + "currentBlobGasUsed": "0x0", + "currentCoinbase": "0xf97e180c050e5ab072211ad2c213eb5aee4df134", + "currentDifficulty": "0x0", + "currentExcessBlobGas": "0x2b80000", + "currentGasLimit": "0x1c9c380", + "currentNumber": "0x1", + "currentRandom": "0x89e03b95990c23f0dbc63247c17d9699464d25d10f9d24a676265bdd82bfb18a", + "currentTimestamp": "0x672bc100", + "parentBaseFee": "0x7", + "parentBeaconBlockRoot": "0x4368de4abb7ed9d11d92d42713a3b3c6e0a271e20d6de882161c2ae1b3a9051f", + "parentBlobGasUsed": "0xc0000", + "parentDifficulty": "0x0", + "parentExcessBlobGas": "0x2b20000", + "parentGasLimit": "0x1c9c380", + "parentGasUsed": "0x430f97", + "parentTimestamp": "0x672bc0f4", + "withdrawals": [ + ] +} diff --git a/tools/t8n/testdata/00-525/env_dca.json b/tools/t8n/testdata/00-525/env_dca.json new file mode 100644 index 0000000000..f17788daf2 --- /dev/null +++ b/tools/t8n/testdata/00-525/env_dca.json @@ -0,0 +1,24 @@ +{ + "blockHashes": { + }, + "currentBaseFee": "0x7", + "currentBlobGasUsed": "0x0", + "currentCoinbase": "0xf97e180c050e5ab072211ad2c213eb5aee4df134", + "currentDifficulty": "0x0", + "currentExcessBlobGas": "0x2b80000", + "currentGasLimit": "0x1c9c380", + "currentNumber": "0x1", + "currentRandom": "0x89e03b95990c23f0dbc63247c17d9699464d25d10f9d24a676265bdd82bfb18a", + "currentTimestamp": "0x672bc100", + "parentBaseFee": "0x7", + "parentBeaconBlockRoot": "0x4368de4abb7ed9d11d92d42713a3b3c6e0a271e20d6de882161c2ae1b3a9051f", + "parentBlobGasUsed": "0xc0000", + "parentDifficulty": "0x0", + "parentExcessBlobGas": "0x2b20000", + "parentGasLimit": "0x1c9c380", + "parentGasUsed": "0x430f97", + "parentTimestamp": "0x672bc0f4", + "withdrawals": [ + ], + "depositContractAddress": "0x4242424242424242424242424242424242424242" +} diff --git a/tools/t8n/testdata/00-525/exp1.json b/tools/t8n/testdata/00-525/exp1.json new file mode 100644 index 0000000000..31a149c09d --- /dev/null +++ b/tools/t8n/testdata/00-525/exp1.json @@ -0,0 +1,28 @@ +{ + "result": { + "stateRoot": "0x08fd46faf1bc68f8f3b6c78710d91b825cb22375aa8a3db99ddd9084cbbbfd75", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": null, + "gasUsed": "0x0", + "rejected": [ + { + "index": 0, + "error": "invalid chain id: have 7078815900 want 1" + } + ], + "currentBaseFee": "0x7", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x2b80000", + "blobGasUsed": "0x0", + "requestsHash": "0x6036c41849da9c076ed79654d434017387a88fb833c2856b32e18218b3341c5f", + "requests": [ + "0x", + "0x", + "0x" + ] + } +} diff --git a/tools/t8n/testdata/00-525/exp2.json b/tools/t8n/testdata/00-525/exp2.json new file mode 100644 index 0000000000..a3ee77a377 --- /dev/null +++ b/tools/t8n/testdata/00-525/exp2.json @@ -0,0 +1,44 @@ +{ + "result": { + "stateRoot": "0xdbeeef4c53f45167aea35bccfa9329f4a9b59fa7b115dc2f6462eea7316c2600", + "txRoot": "0xe67cc7032923c32bc107773c80dbe2afc754eeed4203ccee50605b9fe15a5bb5", + "receiptsRoot": "0xead5884f735e5e8e703878f0c89b54919d06c2cee8e373d632e730d42b380e0c", + "logsHash": "0x6fb31d3ef4580565e54b679a3bb580d1a4c345faab2cfc7868304e0e5da23f4c", + "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x14aea", + "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", + "logs": [ + { + "address": "0x4242424242424242424242424242424242424242", + "topics": [ + "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003081521c60874daf5b425c21e44caf045c4d475e8b33a557a28cee3c46ef9cf9bd95b4c75a0bb629981b40d0102452dd4c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020020000000000000000000000332e43696a505ef45b9319973785f837ce5267b90000000000000000000000000000000000000000000000000000000000000008000065cd1d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000608c8f2647f342d2c3e8fd07c6b3b9b16383ac11c4be6a6962c7fc18a789daee5fac20ee0bbe4a10383759aaffacacb72b0d67f998730cdf4995fe73afe434dfce2803b343606f67fc4995597c0af9e0fe9ed00006e5889bec29171f670e7d9be200000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000" + } + ], + "transactionHash": "0x75b5508fdcec7682f238fb1ccdc9a087f3f8b601fd52d5c0684123698d89f0a6", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x14aea", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0", + "type": "0x2" + } + ], + "currentDifficulty": null, + "gasUsed": "0x14aea", + "currentBaseFee": "0x7", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x2b80000", + "blobGasUsed": "0x0", + "requestsHash": "0x6036c41849da9c076ed79654d434017387a88fb833c2856b32e18218b3341c5f", + "requests": [ + "0x", + "0x", + "0x" + ] + } +} diff --git a/tools/t8n/testdata/00-525/exp3.json b/tools/t8n/testdata/00-525/exp3.json new file mode 100644 index 0000000000..f45453427f --- /dev/null +++ b/tools/t8n/testdata/00-525/exp3.json @@ -0,0 +1,44 @@ +{ + "result": { + "stateRoot": "0xdbeeef4c53f45167aea35bccfa9329f4a9b59fa7b115dc2f6462eea7316c2600", + "txRoot": "0xe67cc7032923c32bc107773c80dbe2afc754eeed4203ccee50605b9fe15a5bb5", + "receiptsRoot": "0xead5884f735e5e8e703878f0c89b54919d06c2cee8e373d632e730d42b380e0c", + "logsHash": "0x6fb31d3ef4580565e54b679a3bb580d1a4c345faab2cfc7868304e0e5da23f4c", + "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x14aea", + "logsBloom": "0x10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000", + "logs": [ + { + "address": "0x4242424242424242424242424242424242424242", + "topics": [ + "0x649bbc62d0e31342afea4e5cd82d4049e7e1ee912fc0889aa790803be39038c5" + ], + "data": "0x00000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000003081521c60874daf5b425c21e44caf045c4d475e8b33a557a28cee3c46ef9cf9bd95b4c75a0bb629981b40d0102452dd4c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020020000000000000000000000332e43696a505ef45b9319973785f837ce5267b90000000000000000000000000000000000000000000000000000000000000008000065cd1d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000608c8f2647f342d2c3e8fd07c6b3b9b16383ac11c4be6a6962c7fc18a789daee5fac20ee0bbe4a10383759aaffacacb72b0d67f998730cdf4995fe73afe434dfce2803b343606f67fc4995597c0af9e0fe9ed00006e5889bec29171f670e7d9be200000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000" + } + ], + "transactionHash": "0x75b5508fdcec7682f238fb1ccdc9a087f3f8b601fd52d5c0684123698d89f0a6", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x14aea", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0", + "type": "0x2" + } + ], + "currentDifficulty": null, + "gasUsed": "0x14aea", + "currentBaseFee": "0x7", + "withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "currentExcessBlobGas": "0x2b80000", + "blobGasUsed": "0x0", + "requestsHash": "0xa5c19ed76c01fe25a67b7ef8c227caa34951e8c7e74168408b5be0861dac686d", + "requests": [ + "0x81521c60874daf5b425c21e44caf045c4d475e8b33a557a28cee3c46ef9cf9bd95b4c75a0bb629981b40d0102452dd4c020000000000000000000000332e43696a505ef45b9319973785f837ce5267b9000065cd1d0000008c8f2647f342d2c3e8fd07c6b3b9b16383ac11c4be6a6962c7fc18a789daee5fac20ee0bbe4a10383759aaffacacb72b0d67f998730cdf4995fe73afe434dfce2803b343606f67fc4995597c0af9e0fe9ed00006e5889bec29171f670e7d9be20000000000000000", + "0x", + "0x" + ] + } +} diff --git a/tools/t8n/testdata/00-525/txs.rlp b/tools/t8n/testdata/00-525/txs.rlp new file mode 100644 index 0000000000..6410c11e61 --- /dev/null +++ b/tools/t8n/testdata/00-525/txs.rlp @@ -0,0 +1 @@ +"0xf90226b9022302f9021f8501a5ee289c07840e07899f840e07899f8302c0c19442424242424242424242424242424242424242428906f05b59d3b2000000b901a422895118000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000001206c240a071b7048221c3f53bb0e66debee5330def076d51c9958c776e3f13d5d4000000000000000000000000000000000000000000000000000000000000003081521c60874daf5b425c21e44caf045c4d475e8b33a557a28cee3c46ef9cf9bd95b4c75a0bb629981b40d0102452dd4c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020020000000000000000000000332e43696a505ef45b9319973785f837ce5267b900000000000000000000000000000000000000000000000000000000000000608c8f2647f342d2c3e8fd07c6b3b9b16383ac11c4be6a6962c7fc18a789daee5fac20ee0bbe4a10383759aaffacacb72b0d67f998730cdf4995fe73afe434dfce2803b343606f67fc4995597c0af9e0fe9ed00006e5889bec29171f670e7d9be2c001a0e36549b612e7fd2c1b0635025782f6c4841317a1a4b4614b3a39b95754fca7f6a014ca51a1714612ee97e1b5a785bd0c8e388f46539d36cc8ef014127a0bfcf05a" \ No newline at end of file diff --git a/tools/t8n/transition.nim b/tools/t8n/transition.nim index 85f312c372..58ab9bcc87 100644 --- a/tools/t8n/transition.nim +++ b/tools/t8n/transition.nim @@ -71,9 +71,7 @@ proc dispatchOutput(ctx: TransContext, conf: T8NConf, res: ExecOutput) = dis.dispatch(conf.outputBaseDir, conf.outputAlloc, "alloc", @@(res.alloc)) dis.dispatch(conf.outputBaseDir, conf.outputResult, "result", @@(res.result)) - let chainId = conf.stateChainId.ChainId let txList = ctx.filterGoodTransactions() - let body = @@(rlp.encode(txList)) dis.dispatch(conf.outputBaseDir, conf.outputBody, "body", body) @@ -152,7 +150,7 @@ proc defaultTraceStreamFilename(conf: T8NConf, txIndex: int, txHash: Hash32): (string, string) = let - txHash = "0x" & toLowerAscii($txHash) + txHash = toLowerAscii($txHash) baseDir = if conf.outputBaseDir.len > 0: conf.outputBaseDir else: @@ -169,8 +167,9 @@ proc traceToFileStream(path: string, txIndex: int): Stream = # replace whatever `.ext` to `-${txIndex}.jsonl` let file = path.splitFile - fName = "$1/$2-$3.jsonl" % [file.dir, file.name, $txIndex] - createDir(file.dir) + folder = if file.dir.len == 0: "." else: file.dir + fName = "$1/$2-$3.jsonl" % [folder, file.name, $txIndex] + if file.dir.len > 0: createDir(file.dir) newFileStream(fName, fmWrite) proc setupTrace(conf: T8NConf, txIndex: int, txHash: Hash32, vmState: BaseVMState): bool = @@ -357,10 +356,11 @@ proc exec(ctx: TransContext, for rec in result.result.receipts: allLogs.add rec.logs let - depositReqs = parseDepositLogs(allLogs).valueOr: + depositReqs = parseDepositLogs(allLogs, vmState.com.depositContractAddress).valueOr: raise newError(ErrorEVM, error) - requestsHash = calcRequestsHashInsertType(depositReqs, withdrawalReqs, consolidationReqs) + requestsHash = calcRequestsHash(depositReqs, withdrawalReqs, consolidationReqs) result.result.requestsHash = Opt.some(requestsHash) + result.result.requests = Opt.some([depositReqs, withdrawalReqs, consolidationReqs]) template wrapException(body: untyped) = when wrapExceptionEnabled: @@ -426,11 +426,6 @@ proc transitionAction*(ctx: var TransContext, conf: T8NConf) = if conf.inputAlloc.len == 0 and conf.inputEnv.len == 0 and conf.inputTxs.len == 0: raise newError(ErrorConfig, "either one of input is needeed(alloc, txs, or env)") - let config = parseChainConfig(conf.stateFork) - config.chainId = conf.stateChainId.ChainId - - let com = CommonRef.new(newCoreDbRef DefaultDbMemory, config) - # We need to load three things: alloc, env and transactions. # May be either in stdin input or in files. @@ -448,7 +443,7 @@ proc transitionAction*(ctx: var TransContext, conf: T8NConf) = if conf.inputTxs != stdinSelector and conf.inputTxs.len > 0: if conf.inputTxs.endsWith(".rlp"): let data = readFile(conf.inputTxs) - ctx.parseTxsRlp(data.strip(chars={'"'})) + ctx.parseTxsRlp(data.strip(chars={'"', ' ', '\r', '\n', '\t'})) else: ctx.parseTxsJson(conf.inputTxs) @@ -467,6 +462,12 @@ proc transitionAction*(ctx: var TransContext, conf: T8NConf) = excessBlobGas: ctx.env.parentExcessBlobGas, ) + let config = parseChainConfig(conf.stateFork) + config.depositContractAddress = ctx.env.depositContractAddress + config.chainId = conf.stateChainId.ChainId + + let com = CommonRef.new(newCoreDbRef DefaultDbMemory, config) + # Sanity check, to not `panic` in state_transition if com.isLondonOrLater(ctx.env.currentNumber): if ctx.env.currentBaseFee.isSome: diff --git a/tools/t8n/types.nim b/tools/t8n/types.nim index 1ed23477f6..eb8eba6c1d 100644 --- a/tools/t8n/types.nim +++ b/tools/t8n/types.nim @@ -52,6 +52,7 @@ type parentBlobGasUsed*: Opt[uint64] parentExcessBlobGas*: Opt[uint64] parentBeaconBlockRoot*: Opt[Hash32] + depositContractAddress*: Opt[Address] TxObject* = object `type`*: Opt[uint64] @@ -117,6 +118,7 @@ type blobGasUsed*: Opt[uint64] currentExcessBlobGas*: Opt[uint64] requestsHash*: Opt[Hash32] + requests*: Opt[array[3, seq[byte]]] const ErrorEVM* = 2.T8NExitCode diff --git a/vendor/nim-eth b/vendor/nim-eth index 66297c5c0a..dc092ca393 160000 --- a/vendor/nim-eth +++ b/vendor/nim-eth @@ -1 +1 @@ -Subproject commit 66297c5c0a8c22ec3f16c899e902d79aa00df575 +Subproject commit dc092ca39303b030b42aa405e8d5f2f44f21b457 diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 40854fb51f..ff92d28779 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 40854fb51fd444b7537da767800fbca2189eb2b7 +Subproject commit ff92d2877985faee5ac6abc3bea805b85f2be1c8 diff --git a/vendor/nimbus-build-system b/vendor/nimbus-build-system index 4afb052662..8fafcd0bac 160000 --- a/vendor/nimbus-build-system +++ b/vendor/nimbus-build-system @@ -1 +1 @@ -Subproject commit 4afb0526629f51aef4c124368365ebe90a782d37 +Subproject commit 8fafcd0bac9f409091b7bcaee62ab6330f57441e