diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index 12e50909d2c..345a9048ef3 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -37,6 +37,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/snapshot" system_addresses "github.com/onflow/flow-go/cmd/util/cmd/system-addresses" truncate_database "github.com/onflow/flow-go/cmd/util/cmd/truncate-database" + verify_execution_result "github.com/onflow/flow-go/cmd/util/cmd/verify_execution_result" "github.com/onflow/flow-go/cmd/util/cmd/version" "github.com/onflow/flow-go/module/profiler" ) @@ -118,6 +119,7 @@ func addCommands() { rootCmd.AddCommand(run_script.Cmd) rootCmd.AddCommand(system_addresses.Cmd) rootCmd.AddCommand(check_storage.Cmd) + rootCmd.AddCommand(verify_execution_result.Cmd) } func initConfig() { diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go new file mode 100644 index 00000000000..6aa7e135483 --- /dev/null +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -0,0 +1,98 @@ +package verify + +import ( + "fmt" + "strconv" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/engine/verification/verifier" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagLastK uint64 + flagDatadir string + flagChunkDataPackDir string + flagChain string + flagFromTo string +) + +// # verify the last 100 sealed blocks +// ./util verify_execution_result --chain flow-testnet --datadir /var/flow/data/protocol --chunk_data_pack_dir /var/flow/data/chunk_data_pack --lastk 100 +// # verify the blocks from height 2000 to 3000 +// ./util verify_execution_result --chain flow-testnet --datadir /var/flow/data/protocol --chunk_data_pack_dir /var/flow/data/chunk_data_pack --from_to 2000-3000 +var Cmd = &cobra.Command{ + Use: "verify-execution-result", + Short: "verify block execution by verifying all chunks in the result", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagDatadir, "datadir", "/var/flow/data/protocol", + "directory that stores the protocol state") + + Cmd.Flags().StringVar(&flagChunkDataPackDir, "chunk_data_pack_dir", "/var/flow/data/chunk_data_pack", + "directory that stores the protocol state") + + Cmd.Flags().Uint64Var(&flagLastK, "lastk", 1, + "last k sealed blocks to verify") + + Cmd.Flags().StringVar(&flagFromTo, "from_to", "", + "the height range to verify blocks, i.e, 1-1000, 1000-2000, 2000-3000, etc.") +} + +func run(*cobra.Command, []string) { + _ = flow.ChainID(flagChain).Chain() + + if flagFromTo != "" { + from, to, err := parseFromTo(flagFromTo) + if err != nil { + log.Fatal().Err(err).Msg("could not parse from_to") + } + + log.Info().Msgf("verifying range from %d to %d", from, to) + err = verifier.VerifyRange(from, to, flow.Testnet, flagDatadir, flagChunkDataPackDir) + if err != nil { + log.Fatal().Err(err).Msg("could not verify last k height") + } + log.Info().Msgf("successfully verified range from %d to %d", from, to) + + } else { + log.Info().Msgf("verifying last %d sealed blocks", flagLastK) + err := verifier.VerifyLastKHeight(flagLastK, flow.Testnet, flagDatadir, flagChunkDataPackDir) + if err != nil { + log.Fatal().Err(err).Msg("could not verify last k height") + } + + log.Info().Msgf("successfully verified last %d sealed blocks", flagLastK) + } +} + +func parseFromTo(fromTo string) (from, to uint64, err error) { + parts := strings.Split(fromTo, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid format: expected 'from-to', got '%s'", fromTo) + } + + from, err = strconv.ParseUint(strings.TrimSpace(parts[0]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'from' value: %w", err) + } + + to, err = strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'to' value: %w", err) + } + + if from > to { + return 0, 0, fmt.Errorf("'from' value (%d) must be less than or equal to 'to' value (%d)", from, to) + } + + return from, to, nil +} diff --git a/engine/execution/computation/execution_verification_test.go b/engine/execution/computation/execution_verification_test.go index c949b378df4..bcdadd2a0ad 100644 --- a/engine/execution/computation/execution_verification_test.go +++ b/engine/execution/computation/execution_verification_test.go @@ -25,7 +25,6 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/engine/testutil/mocklocal" - "github.com/onflow/flow-go/engine/verification/fetcher" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/environment" @@ -36,6 +35,7 @@ import ( "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module/chunks" "github.com/onflow/flow-go/module/executiondatasync/execution_data" exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" @@ -69,7 +69,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { `access(all) contract Foo { access(all) event FooEvent(x: Int, y: Int) - access(all) fun emitEvent() { + access(all) fun emitEvent() { emit FooEvent(x: 2, y: 1) } }`), "Foo") @@ -113,7 +113,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { `access(all) contract Foo { access(all) event FooEvent(x: Int, y: Int) - access(all) fun emitEvent() { + access(all) fun emitEvent() { emit FooEvent(x: 2, y: 1) } }`), "Foo") @@ -585,34 +585,34 @@ func TestTransactionFeeDeduction(t *testing.T) { // // The withdraw amount and the account from getAccount // would be the parameters to the transaction - + import FungibleToken from 0x%s import FlowToken from 0x%s - + transaction(amount: UFix64, to: Address) { - + // The Vault resource that holds the tokens that are being transferred let sentVault: @{FungibleToken.Vault} - + prepare(signer: auth(BorrowValue) &Account) { - + // Get a reference to the signer's stored vault let vaultRef = signer.storage.borrow(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") - + // Withdraw tokens from the signer's stored vault self.sentVault <- vaultRef.withdraw(amount: amount) } - + execute { - + // Get the recipient's public account object let recipient = getAccount(to) - + // Get a reference to the recipient's Receiver let receiverRef = recipient.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") - + // Deposit the withdrawn tokens in the recipient's receiver receiverRef.deposit(from: <-self.sentVault) } @@ -840,7 +840,7 @@ func executeBlockAndVerifyWithParameters(t *testing.T, for i, chunk := range er.Chunks { isSystemChunk := i == er.Chunks.Len()-1 - offsetForChunk, err := fetcher.TransactionOffsetForChunk(er.Chunks, chunk.Index) + offsetForChunk, err := convert.TransactionOffsetForChunk(er.Chunks, chunk.Index) require.NoError(t, err) vcds[i] = &verification.VerifiableChunkData{ diff --git a/engine/verification/fetcher/engine.go b/engine/verification/fetcher/engine.go index 20afad04021..551b0571526 100644 --- a/engine/verification/fetcher/engine.go +++ b/engine/verification/fetcher/engine.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/trace" @@ -259,7 +260,7 @@ func (e *Engine) HandleChunkDataPack(originID flow.Identifier, response *verific Uint64("block_height", status.BlockHeight). Hex("result_id", logging.ID(resultID)). Uint64("chunk_index", status.ChunkIndex). - Bool("system_chunk", IsSystemChunk(status.ChunkIndex, status.ExecutionResult)). + Bool("system_chunk", convert.IsSystemChunk(status.ChunkIndex, status.ExecutionResult)). Logger() span, ctx := e.tracer.StartBlockSpan(context.Background(), status.ExecutionResult.BlockID, trace.VERFetcherHandleChunkDataPack) @@ -413,7 +414,7 @@ func (e Engine) validateCollectionID( result *flow.ExecutionResult, chunk *flow.Chunk) error { - if IsSystemChunk(chunk.Index, result) { + if convert.IsSystemChunk(chunk.Index, result) { return e.validateSystemChunkCollection(chunkDataPack) } @@ -550,29 +551,13 @@ func (e *Engine) makeVerifiableChunkData(chunk *flow.Chunk, chunkDataPack *flow.ChunkDataPack, ) (*verification.VerifiableChunkData, error) { - // system chunk is the last chunk - isSystemChunk := IsSystemChunk(chunk.Index, result) - - endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk) - if err != nil { - return nil, fmt.Errorf("could not compute end state of chunk: %w", err) - } - - transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index) - if err != nil { - return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err) - } - - return &verification.VerifiableChunkData{ - IsSystemChunk: isSystemChunk, - Chunk: chunk, - Header: header, - Snapshot: snapshot, - Result: result, - ChunkDataPack: chunkDataPack, - EndState: endState, - TransactionOffset: transactionOffset, - }, nil + return convert.FromChunkDataPack( + chunk, + chunkDataPack, + header, + snapshot, + result, + ) } // requestChunkDataPack creates and dispatches a chunk data pack request to the requester engine. @@ -661,42 +646,3 @@ func executorsOf(receipts []*flow.ExecutionReceipt, resultID flow.Identifier) (f return agrees, disagrees } - -// EndStateCommitment computes the end state of the given chunk. -func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) { - var endState flow.StateCommitment - if systemChunk { - var err error - // last chunk in a result is the system chunk and takes final state commitment - endState, err = result.FinalStateCommitment() - if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err) - } - } else { - // any chunk except last takes the subsequent chunk's start state - endState = result.Chunks[chunkIndex+1].StartState - } - - return endState, nil -} - -// TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first -// transaction of this chunk within the whole block -func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) { - if int(chunkIndex) > len(chunks)-1 { - return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex) - } - var offset uint32 = 0 - for i := 0; i < int(chunkIndex); i++ { - offset += uint32(chunks[i].NumberOfTransactions) - } - return offset, nil -} - -// IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`. -// Otherwise, it returns false. -// In the current version, a chunk is a system chunk if it is the last chunk of the -// execution result. -func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool { - return chunkIndex == uint64(len(result.Chunks)-1) -} diff --git a/engine/verification/fetcher/engine_test.go b/engine/verification/fetcher/engine_test.go index b2fb94a94cb..273a76ac73f 100644 --- a/engine/verification/fetcher/engine_test.go +++ b/engine/verification/fetcher/engine_test.go @@ -18,6 +18,7 @@ import ( "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" mempool "github.com/onflow/flow-go/module/mempool/mock" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" @@ -757,10 +758,10 @@ func mockVerifierEngine(t *testing.T, require.Equal(t, expected.Result.ID(), vc.Result.ID()) require.Equal(t, expected.Header.ID(), vc.Header.ID()) - isSystemChunk := fetcher.IsSystemChunk(vc.Chunk.Index, vc.Result) + isSystemChunk := convert.IsSystemChunk(vc.Chunk.Index, vc.Result) require.Equal(t, isSystemChunk, vc.IsSystemChunk) - endState, err := fetcher.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk) + endState, err := convert.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk) require.NoError(t, err) require.Equal(t, endState, vc.EndState) @@ -872,7 +873,7 @@ func chunkDataPackResponseFixture(t *testing.T, collection *flow.Collection, result *flow.ExecutionResult) *verification.ChunkDataPackResponse { - require.Equal(t, collection != nil, !fetcher.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection") + require.Equal(t, collection != nil, !convert.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection") return &verification.ChunkDataPackResponse{ Locator: chunks.Locator{ @@ -917,7 +918,7 @@ func verifiableChunkFixture(t *testing.T, result *flow.ExecutionResult, chunkDataPack *flow.ChunkDataPack) *verification.VerifiableChunkData { - offsetForChunk, err := fetcher.TransactionOffsetForChunk(result.Chunks, chunk.Index) + offsetForChunk, err := convert.TransactionOffsetForChunk(result.Chunks, chunk.Index) require.NoError(t, err) // TODO: add end state @@ -1000,7 +1001,7 @@ func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount in locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses) for _, status := range statuses { - if fetcher.IsSystemChunk(status.ChunkIndex, result) { + if convert.IsSystemChunk(status.ChunkIndex, result) { // system-chunk should have a nil collection continue } @@ -1012,7 +1013,7 @@ func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount in func TestTransactionOffsetForChunk(t *testing.T) { t.Run("first chunk index always returns zero offset", func(t *testing.T) { - offsetForChunk, err := fetcher.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0) + offsetForChunk, err := convert.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0) require.NoError(t, err) assert.Equal(t, uint32(0), offsetForChunk) }) @@ -1042,19 +1043,19 @@ func TestTransactionOffsetForChunk(t *testing.T) { }, } - offsetForChunk, err := fetcher.TransactionOffsetForChunk(chunksList, 0) + offsetForChunk, err := convert.TransactionOffsetForChunk(chunksList, 0) require.NoError(t, err) assert.Equal(t, uint32(0), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 1) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 1) require.NoError(t, err) assert.Equal(t, uint32(1), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 2) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 2) require.NoError(t, err) assert.Equal(t, uint32(3), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 3) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 3) require.NoError(t, err) assert.Equal(t, uint32(6), offsetForChunk) }) @@ -1063,7 +1064,7 @@ func TestTransactionOffsetForChunk(t *testing.T) { chunksList := make([]*flow.Chunk, 2) - _, err := fetcher.TransactionOffsetForChunk(chunksList, 2) + _, err := convert.TransactionOffsetForChunk(chunksList, 2) require.Error(t, err) }) } diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go new file mode 100644 index 00000000000..1bc1d11652a --- /dev/null +++ b/engine/verification/verifier/verifiers.go @@ -0,0 +1,204 @@ +package verifier + +import ( + "fmt" + + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification/convert" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/chunks" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + storagepebble "github.com/onflow/flow-go/storage/pebble" +) + +// VerifyLastKHeight verifies the last k sealed blocks by verifying all chunks in the results. +// It assumes the latest sealed block has been executed, and the chunk data packs have not been +// pruned. +func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string) error { + db, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + if err != nil { + return fmt.Errorf("could not init storages: %w", err) + } + defer db.Close() + + lastSealed, err := state.Sealed().Head() + if err != nil { + return fmt.Errorf("could not get last sealed height: %w", err) + } + + root := state.Params().SealedRoot().Height + from := lastSealed.Height - k + 1 + + // root block is not verifiable, because it's sealed already. + // the first verifiable is the next block of the root block + firstVerifiable := root + 1 + + if from < firstVerifiable { + from = firstVerifiable + } + to := lastSealed.Height + + for height := from; height <= to; height++ { + log.Info().Uint64("height", height).Msg("verifying height") + err := verifyHeight(height, storages.Headers, chunkDataPacks, storages.Results, state, verifier) + if err != nil { + return fmt.Errorf("could not verify height %d: %w", height, err) + } + } + + return nil +} + +// VerifyRange verifies all chunks in the results of the blocks in the given range. +func VerifyRange( + from, to uint64, + chainID flow.ChainID, + protocolDataDir string, chunkDataPackDir string, +) error { + db, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + if err != nil { + return fmt.Errorf("could not init storages: %w", err) + } + defer db.Close() + + for height := from; height <= to; height++ { + log.Info().Uint64("height", height).Msg("verifying height") + err := verifyHeight(height, storages.Headers, chunkDataPacks, storages.Results, state, verifier) + if err != nil { + return fmt.Errorf("could not verify height %d: %w", height, err) + } + } + + return nil +} + +func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) ( + *badger.DB, + *storage.All, + storage.ChunkDataPacks, + protocol.State, + module.ChunkVerifier, + error, +) { + db := common.InitStorage(dataDir) + + storages := common.InitStorages(db) + state, err := common.InitProtocolState(db, storages) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) + } + + chunkDataPackDB, err := storagepebble.OpenDefaultPebbleDB(chunkDataPackDir) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) + } + chunkDataPacks := storagepebble.NewChunkDataPacks(metrics.NewNoopCollector(), + chunkDataPackDB, storages.Collections, 1000) + + verifier := makeVerifier(log.Logger, chainID, storages.Headers) + return db, storages, chunkDataPacks, state, verifier, nil +} + +func verifyHeight( + height uint64, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + state protocol.State, + verifier module.ChunkVerifier, +) error { + header, err := headers.ByHeight(height) + if err != nil { + return fmt.Errorf("could not get block header by height %d: %w", height, err) + } + + blockID := header.ID() + + if err != nil { + return fmt.Errorf("could not get block ID by height %d: %w", height, err) + } + + result, err := results.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get execution result by block ID %s: %w", blockID, err) + } + snapshot := state.AtBlockID(blockID) + + for i, chunk := range result.Chunks { + chunkDataPack, err := chunkDataPacks.ByChunkID(chunk.ID()) + if err != nil { + return fmt.Errorf("could not get chunk data pack by chunk ID %s: %w", chunk.ID(), err) + } + + vcd, err := convert.FromChunkDataPack(chunk, chunkDataPack, header, snapshot, result) + if err != nil { + return err + } + + _, err = verifier.Verify(vcd) + if err != nil { + return fmt.Errorf("could not verify %d-th chunk: %w", i, err) + } + } + return nil +} + +func makeVerifier( + logger zerolog.Logger, + chainID flow.ChainID, + headers storage.Headers, +) module.ChunkVerifier { + + vm := fvm.NewVirtualMachine() + fvmOptions := initFvmOptions(chainID, headers) + fvmOptions = append( + []fvm.Option{fvm.WithLogger(logger)}, + fvmOptions..., + ) + + // TODO(JanezP): cleanup creation of fvm context github.com/onflow/flow-go/issues/5249 + fvmOptions = append(fvmOptions, computation.DefaultFVMOptions(chainID, false, false)...) + vmCtx := fvm.NewContext(fvmOptions...) + + chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, logger) + return chunkVerifier +} + +func initFvmOptions(chainID flow.ChainID, headers storage.Headers) []fvm.Option { + blockFinder := environment.NewBlockFinder(headers) + vmOpts := []fvm.Option{ + fvm.WithChain(chainID.Chain()), + fvm.WithBlocks(blockFinder), + fvm.WithAccountStorageLimit(true), + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Mainnet: + vmOpts = append(vmOpts, + fvm.WithTransactionFeesEnabled(true), + ) + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Localnet, + flow.Benchnet: + vmOpts = append(vmOpts, + fvm.WithContractDeploymentRestricted(false), + ) + } + return vmOpts +} diff --git a/model/verification/convert/convert.go b/model/verification/convert/convert.go new file mode 100644 index 00000000000..4e62e4d446c --- /dev/null +++ b/model/verification/convert/convert.go @@ -0,0 +1,81 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/state/protocol" +) + +func FromChunkDataPack( + chunk *flow.Chunk, + chunkDataPack *flow.ChunkDataPack, + header *flow.Header, + snapshot protocol.Snapshot, + result *flow.ExecutionResult, +) (*verification.VerifiableChunkData, error) { + + // system chunk is the last chunk + isSystemChunk := IsSystemChunk(chunk.Index, result) + + endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk) + if err != nil { + return nil, fmt.Errorf("could not compute end state of chunk: %w", err) + } + + transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index) + if err != nil { + return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err) + } + + return &verification.VerifiableChunkData{ + IsSystemChunk: isSystemChunk, + Chunk: chunk, + Header: header, + Snapshot: snapshot, + Result: result, + ChunkDataPack: chunkDataPack, + EndState: endState, + TransactionOffset: transactionOffset, + }, nil +} + +// EndStateCommitment computes the end state of the given chunk. +func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) { + var endState flow.StateCommitment + if systemChunk { + var err error + // last chunk in a result is the system chunk and takes final state commitment + endState, err = result.FinalStateCommitment() + if err != nil { + return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err) + } + } else { + // any chunk except last takes the subsequent chunk's start state + endState = result.Chunks[chunkIndex+1].StartState + } + + return endState, nil +} + +// TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first +// transaction of this chunk within the whole block +func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) { + if int(chunkIndex) > len(chunks)-1 { + return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex) + } + var offset uint32 = 0 + for i := 0; i < int(chunkIndex); i++ { + offset += uint32(chunks[i].NumberOfTransactions) + } + return offset, nil +} + +// IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`. +// Otherwise, it returns false. +// In the current version, a chunk is a system chunk if it is the last chunk of the +// execution result. +func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool { + return chunkIndex == uint64(len(result.Chunks)-1) +}