diff --git a/abci/example/kvstore/config.go b/abci/example/kvstore/config.go index 4edebf81a..e06495435 100644 --- a/abci/example/kvstore/config.go +++ b/abci/example/kvstore/config.go @@ -70,6 +70,8 @@ type Config struct { ChainLockUpdates map[string]string `toml:"chainlock_updates"` PrivValServerType string `toml:"privval_server_type"` InitAppInitialCoreHeight uint32 `toml:"init_app_core_chain_locked_height"` + // ConsensusVersionUpdates is a map of heights to consensus version ; ONLY SUPPORTED BY e2e.Application + ConsensusVersionUpdates map[string]int32 `toml:"consensus_version_updates"` } func DefaultConfig(dir string) Config { diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 29121647f..b9abd895a 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -62,6 +62,20 @@ func NewApplication(cfg kvstore.Config, opts ...kvstore.OptFunc) (*Application, return nil, err } + for h, ver := range cfg.ConsensusVersionUpdates { + height, err := strconv.Atoi(h) + if err != nil { + return nil, fmt.Errorf("consensus_version_updates: failed to parse height %s: %w", h, err) + } + params := types1.ConsensusParams{ + Version: &types1.VersionParams{ + ConsensusVersion: types1.VersionParams_ConsensusVersion(ver), + AppVersion: kvstore.ProtocolVersion, + }, + } + app.AddConsensusParamsUpdate(params, int64(height)) + } + return &app, nil } @@ -146,7 +160,7 @@ func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.Request } if app.cfg.VoteExtensionDelayMS != 0 { - time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) + time.Sleep(time.Duration(app.cfg.VoteExtensionDelayMS) * time.Millisecond) //#nosec G115 } app.logger.Info("verified vote extension value", "req", req, "nums", nums) diff --git a/test/e2e/networks/rotate.toml b/test/e2e/networks/rotate.toml index 14190da09..84a6c2d15 100644 --- a/test/e2e/networks/rotate.toml +++ b/test/e2e/networks/rotate.toml @@ -80,6 +80,28 @@ validator04 = 100 validator05 = 100 validator11 = 100 + +[validator_update.1070] +validator01 = 100 +validator02 = 100 +validator03 = 100 +validator04 = 100 +validator05 = 100 + + +[validator_update.1077] +validator01 = 100 +validator07 = 100 +validator08 = 100 +validator10 = 100 +validator11 = 100 + +[consensus_version_updates] + +1070 = 1 +1076 = 0 +1079 = 1 + [node.seed01] mode = "seed" perturb = ["restart"] diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index edca4e9cb..918355330 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -30,6 +30,7 @@ type Config struct { QuorumHashUpdate map[string]string `toml:"quorum_hash_update"` ChainLockUpdates map[string]string `toml:"chainlock_updates"` PrivValServerType string `toml:"privval_server_type"` + ConsensusVersionUpdates map[string]int32 `toml:"consensus_version_updates"` } // App extracts out the application specific configuration parameters @@ -46,6 +47,7 @@ func (cfg *Config) App() *kvstore.Config { QuorumHashUpdate: cfg.QuorumHashUpdate, ChainLockUpdates: cfg.ChainLockUpdates, PrivValServerType: cfg.PrivValServerType, + ConsensusVersionUpdates: cfg.ConsensusVersionUpdates, } } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index a070b2b39..d5a653121 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -56,6 +56,13 @@ type Manifest struct { // not specified are not changed. ValidatorUpdates map[string]map[string]int64 `toml:"validator_update"` + // ConsensusVersionUpdates is a map of heights to consensus versions, and + // will be sent by the ABCI application as a consensus params update. + // For example, the following sets the consensus version to 1 at height 1000: + // + // [consensus_version_updates] + // 1000 = 1 + ConsensusVersionUpdates map[string]int32 `toml:"consensus_version_updates"` // ChainLockUpdates is a map of heights at which a new chain lock should be proposed // The first number is the tendermint height, and the second is the // @@ -64,8 +71,6 @@ type Manifest struct { // 1004 = 3451 // 1020 = 3454 // 1040 = 3500 - // - ChainLockUpdates map[string]int64 `toml:"chainlock_updates"` // Nodes specifies the network nodes. At least one node must be given. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 44ca245d2..afa6f7a1f 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -102,6 +102,8 @@ type Testnet struct { QuorumType btcjson.LLMQType QuorumHash crypto.QuorumHash QuorumHashUpdates map[int64]crypto.QuorumHash + // ConsensusVersionUpdates maps height to new consensus version (ConsensusParams.Version.ConsensusVersion) + ConsensusVersionUpdates map[int64]int32 } // Node represents a Tenderdash node in a testnet. @@ -204,18 +206,19 @@ func LoadTestnet(file string) (*Testnet, error) { LogLevel: manifest.LogLevel, TxSize: manifest.TxSize, ABCIProtocol: Protocol(manifest.ABCIProtocol), - PrepareProposalDelayMS: int(manifest.PrepareProposalDelayMS), - ProcessProposalDelayMS: int(manifest.ProcessProposalDelayMS), - CheckTxDelayMS: int(manifest.CheckTxDelayMS), - VoteExtensionDelayMS: int(manifest.VoteExtensionDelayMS), - FinalizeBlockDelayMS: int(manifest.FinalizeBlockDelayMS), - MaxBlockSize: int64(manifest.MaxBlockSize), - MaxEvidenceSize: int64(manifest.MaxEvidenceSize), + PrepareProposalDelayMS: int(manifest.PrepareProposalDelayMS), //#nosec G115 + ProcessProposalDelayMS: int(manifest.ProcessProposalDelayMS), //#nosec G115 + CheckTxDelayMS: int(manifest.CheckTxDelayMS), //#nosec G115 + VoteExtensionDelayMS: int(manifest.VoteExtensionDelayMS), //#nosec G115 + FinalizeBlockDelayMS: int(manifest.FinalizeBlockDelayMS), //#nosec G115 + MaxBlockSize: int64(manifest.MaxBlockSize), //#nosec G115 + MaxEvidenceSize: int64(manifest.MaxEvidenceSize), //#nosec G115 ThresholdPublicKey: ld.ThresholdPubKey, ThresholdPublicKeyUpdates: map[int64]crypto.PubKey{}, QuorumType: btcjson.LLMQType(quorumType), QuorumHash: quorumHash, QuorumHashUpdates: map[int64]crypto.QuorumHash{}, + ConsensusVersionUpdates: map[int64]int32{}, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -439,7 +442,7 @@ func LoadTestnet(file string) (*Testnet, error) { sort.Ints(chainLockSetHeights) - // Set up validator updates. + // Set up chainlock updates. for _, height := range chainLockSetHeights { heightStr := strconv.FormatInt(int64(height), 10) chainLockHeight := manifest.ChainLockUpdates[heightStr] @@ -447,6 +450,14 @@ func LoadTestnet(file string) (*Testnet, error) { fmt.Printf("Set chainlock at height %d / core height is %d\n", height, chainLockHeight) } + for heightStr, cpUpdate := range manifest.ConsensusVersionUpdates { + height, err := strconv.Atoi(heightStr) + if err != nil { + return nil, fmt.Errorf("invalid consensus version update height %q: %w", height, err) + } + testnet.ConsensusVersionUpdates[int64(height)] = cpUpdate + } + return testnet, testnet.Validate() } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index a5395ba8f..b15302408 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -438,6 +438,14 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { cfg["chainlock_updates"] = chainLockUpdates } + if len(node.Testnet.ConsensusVersionUpdates) > 0 { + consensusVersionUpdates := map[string]int32{} + for height, version := range node.Testnet.ConsensusVersionUpdates { + consensusVersionUpdates[strconv.Itoa(int(height))] = version //#nosec:G115 + } + cfg["consensus_version_updates"] = consensusVersionUpdates + } + var buf bytes.Buffer err := toml.NewEncoder(&buf).Encode(cfg) if err != nil { diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 77186b95f..2f63bd01a 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -242,68 +242,70 @@ func TestApp_TxTooBig(t *testing.T) { outcome := make([]txPair, 0, len(nodes)) start := time.Now() - /// Send to each node more txs than we can fit into block - for _, node := range nodes { - ctx, cancel := context.WithTimeout(mainCtx, broadcastTimeout) - defer cancel() - if ctx.Err() != nil { - t.Fatalf("context canceled before broadcasting to all nodes") - } - node := *node + /// Send more txs than we can fit into block - if node.Stateless() { - continue + ctx, cancel := context.WithTimeout(mainCtx, broadcastTimeout) + defer cancel() + + if ctx.Err() != nil { + t.Fatalf("context canceled before broadcasting to all nodes") + } + // find first non-stateless node + var node *e2e.Node + for _, node = range nodes { + if !node.Stateless() { + break } + } - t.Logf("broadcasting to %s", node.Name) + t.Logf("broadcasting to %s", node.Name) - session := rand.Int63() + session := rand.Int63() - var err error - client, err = node.Client() - require.NoError(t, err) + var err error + client, err = node.Client() + require.NoError(t, err) - // FIXME: ConsensusParams is broken for last height, this is just workaround - status, err := client.Status(ctx) - assert.NoError(t, err) - cp, err := client.ConsensusParams(ctx, &status.SyncInfo.LatestBlockHeight) - assert.NoError(t, err) + // FIXME: ConsensusParams is broken for last height, this is just workaround + status, err := client.Status(ctx) + assert.NoError(t, err) + cp, err := client.ConsensusParams(ctx, &status.SyncInfo.LatestBlockHeight) + assert.NoError(t, err) - // ensure we have more txs than fits the block - TxPayloadSize := int(cp.ConsensusParams.Block.MaxBytes / 100) // 1% of block size - numTxs := 101 + // ensure we have more txs than fits the block + TxPayloadSize := int(cp.ConsensusParams.Block.MaxBytes / 100) // 1% of block size + numTxs := 101 - tx := make(types.Tx, TxPayloadSize) // first tx is just zeros + tx := make(types.Tx, TxPayloadSize) // first tx is just zeros - var firstTxHash []byte - var key string + var firstTxHash []byte + var key string - for i := 0; i < numTxs; i++ { - key = fmt.Sprintf("testapp-big-tx-%v-%08x-%d=", node.Name, session, i) - copy(tx, key) + for i := 0; i < numTxs; i++ { + key = fmt.Sprintf("testapp-big-tx-%v-%08x-%d=", node.Name, session, i) + copy(tx, key) - payloadOffset := len(tx) - 8 // where we put the `i` into the payload - assert.Greater(t, payloadOffset, len(key)) + payloadOffset := len(tx) - 8 // where we put the `i` into the payload + assert.Greater(t, payloadOffset, len(key)) - big.NewInt(int64(i)).FillBytes(tx[payloadOffset:]) - assert.Len(t, tx, TxPayloadSize) + big.NewInt(int64(i)).FillBytes(tx[payloadOffset:]) + assert.Len(t, tx, TxPayloadSize) - if i == 0 { - firstTxHash = tx.Hash() - } - - _, err = client.BroadcastTxAsync(ctx, tx) - - assert.NoError(t, err, "failed to broadcast tx %06x", i) + if i == 0 { + firstTxHash = tx.Hash() } - outcome = append(outcome, txPair{ - firstTxHash: firstTxHash, - lastTxHash: tx.Hash(), - }) + _, err = client.BroadcastTxAsync(ctx, tx) + + assert.NoError(t, err, "failed to broadcast tx %06x", i) } + outcome = append(outcome, txPair{ + firstTxHash: firstTxHash, + lastTxHash: tx.Hash(), + }) + t.Logf("submitted txs in %s", time.Since(start).String()) successful := 0 diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index c8e72d0b6..f8756a437 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -16,6 +16,10 @@ import ( "github.com/dashpay/tenderdash/types" ) +// maxBlocks is the maximum number of blocks to fetch from the archive node. +// Use to limit test run time. +const maxBlocks = 500 + func init() { // This can be used to manually specify a testnet manifest and/or node to // run tests against. The testnet must have been started by the runner first. @@ -122,6 +126,10 @@ func fetchBlockChain(ctx context.Context, t *testing.T) []*types.Block { from := status.SyncInfo.EarliestBlockHeight to := status.SyncInfo.LatestBlockHeight + // limit blocks to fetch to avoid long test times + if to-from > maxBlocks { + to = from + maxBlocks + } blocks, ok := blocksCache[testnet.Name] if !ok { blocks = make([]*types.Block, 0, to-from+1) diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 07e0fda85..837980060 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -6,12 +6,13 @@ import ( "testing" "github.com/dashpay/dashd-go/btcjson" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/dashpay/tenderdash/crypto" cryptoenc "github.com/dashpay/tenderdash/crypto/encoding" - selectproposer "github.com/dashpay/tenderdash/internal/consensus/versioned/selectproposer" - "github.com/dashpay/tenderdash/internal/libs/test" + "github.com/dashpay/tenderdash/internal/consensus/versioned/selectproposer" + tmbytes "github.com/dashpay/tenderdash/libs/bytes" e2e "github.com/dashpay/tenderdash/test/e2e/pkg" "github.com/dashpay/tenderdash/types" ) @@ -35,6 +36,10 @@ func TestValidator_Sets(t *testing.T) { } last := status.SyncInfo.LatestBlockHeight + // limit the test to 100 blocks, to avoid long test times + if last > first+100 { + last = first + 100 + } // skip first block if node is pruning blocks, to avoid race conditions if node.RetainBlocks > 0 { @@ -62,7 +67,7 @@ func TestValidator_Sets(t *testing.T) { } // fmt.Printf("node %s(%X) validator set for height %d is %v\n", // node.Name, node.ProTxHash, h, valSchedule.Set) - for i, valScheduleValidator := range valSchedule.Set.ValidatorSet().Validators { + for i, valScheduleValidator := range valSchedule.ValidatorProposer.ValidatorSet().Validators { validator := validators[i] require.Equal(t, valScheduleValidator.ProTxHash, validator.ProTxHash, "mismatching validator proTxHashes at height %v (%X <=> %X", h, @@ -74,9 +79,9 @@ func TestValidator_Sets(t *testing.T) { // Validators in the schedule don't contain addresses validator.NodeAddress = types.ValidatorAddress{} } - require.Equal(t, valSchedule.Set.ValidatorSet().Validators, validators, + require.Equal(t, valSchedule.ValidatorProposer.ValidatorSet().Validators, validators, "incorrect validator set at height %v", h) - require.Equal(t, valSchedule.Set.ValidatorSet().ThresholdPublicKey, thresholdPublicKey, + require.Equal(t, valSchedule.ValidatorProposer.ValidatorSet().ThresholdPublicKey, thresholdPublicKey, "incorrect thresholdPublicKey at height %v", h) require.NoError(t, valSchedule.Increment(1)) } @@ -90,6 +95,29 @@ func TestValidator_Propose(t *testing.T) { defer cancel() blocks := fetchBlockChain(ctx, t) + + // check if proposer order is correct, that it: + // - validators don't vote twice in a row + // - validators vote in ascending proTxHash order + // - validators vote in round-robin order + var prevProposer tmbytes.HexBytes + var prevBlock *types.Block + for _, block := range blocks { + currentProposer := block.ProposerProTxHash + require.NotEmpty(t, currentProposer, "block %v has no proposer", block.Height) + + // don't verify heights where validator rotation happens + if prevBlock != nil && prevBlock.ValidatorsHash.Equal(block.ValidatorsHash) { + assert.NotEqual(t, prevProposer, currentProposer, + "validator %s proposed two blocks in a row", currentProposer.ShortString()) + assert.Less(t, prevProposer, currentProposer, + "previous proposer %s is higher than proposer %s at height %d", + prevProposer.ShortString(), currentProposer.ShortString(), block.Header.Height) + } + + prevBlock = block + } + testNode(t, func(_ctx context.Context, t *testing.T, node e2e.Node) { if node.Mode != e2e.ModeValidator { return @@ -99,16 +127,15 @@ func TestValidator_Propose(t *testing.T) { expectCount := 0 proposeCount := 0 - var prevBlock *types.Block - for _, block := range blocks { - if prevBlock == nil { - prevBlock = block - continue + for i, block := range blocks { + round := int32(0) + if i+1 < len(blocks) { // we might be missing the last commit, we'll assume round 0 + round = blocks[i+1].LastCommit.Round } - - if bytes.Equal(valSchedule.Set.MustGetProposer(prevBlock.Height, block.LastCommit.Round).ProTxHash, proTxHash) { + expectedProposer := valSchedule.ValidatorProposer.MustGetProposer(block.Height, round).ProTxHash + if bytes.Equal(expectedProposer, proTxHash) { expectCount++ - if bytes.Equal(prevBlock.ProposerProTxHash, proTxHash) { + if bytes.Equal(block.ProposerProTxHash, proTxHash) { proposeCount++ } } @@ -129,11 +156,14 @@ func TestValidator_Propose(t *testing.T) { // validatorSchedule is a validator set iterator, which takes into account // validator set updates. type validatorSchedule struct { - Set selectproposer.ProposerSelector + ValidatorProposer selectproposer.ProposerSelector height int64 updates map[int64]e2e.ValidatorsMap thresholdPublicKeyUpdates map[int64]crypto.PubKey quorumHashUpdates map[int64]crypto.QuorumHash + consensusVersionUpdates map[int64]int32 + + consensusVersions map[int64]int32 } func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { @@ -155,43 +185,95 @@ func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { panic("quorum hash key must be set for height 0 if validator changes") } } - valset := types.NewValidatorSet(makeVals(valMap), thresholdPublicKey, quorumType, quorumHash, true) - vs := test.Must(selectproposer.NewProposerSelector(types.ConsensusParams{}, valset, - testnet.InitialHeight, 0, nil, nil)) + + vs := types.NewValidatorSet(makeVals(valMap), thresholdPublicKey, quorumType, quorumHash, true) + ps, err := selectproposer.NewProposerSelector(*types.DefaultConsensusParams(), vs, testnet.InitialHeight, 0, nil, nil) + if err != nil { + panic(err) + } + return &validatorSchedule{ height: testnet.InitialHeight, - Set: vs, + ValidatorProposer: ps, updates: testnet.ValidatorUpdates, thresholdPublicKeyUpdates: testnet.ThresholdPublicKeyUpdates, quorumHashUpdates: testnet.QuorumHashUpdates, + consensusVersions: make(map[int64]int32), + consensusVersionUpdates: testnet.ConsensusVersionUpdates, + } +} + +func (s *validatorSchedule) consensusVersionUpdate() int32 { + var version int32 + ok := false + + // find last consensus params + for h := s.height; h > 0 && !ok; h-- { + if version, ok = s.consensusVersions[h]; !ok { + var updatedVersion int32 + if updatedVersion, ok = s.consensusVersionUpdates[h]; ok { + version = updatedVersion + s.consensusVersions[h] = version + } + } } + + // save it + s.consensusVersions[s.height] = version + + return version +} + +func (s *validatorSchedule) ConsensusParams() types.ConsensusParams { + ver := s.consensusVersionUpdate() + + cp := *types.DefaultConsensusParams() + cp.Version.ConsensusVersion = ver + + return cp } func (s *validatorSchedule) Increment(heights int64) error { for i := int64(0); i < heights; i++ { s.height++ + + // consensus params update - for now, we only support consensus version updates + s.consensusVersionUpdate() + cp := s.ConsensusParams() + + // validator set update if s.height > 1 { // validator set updates are offset by 1, since they only take effect // 1 block after they're returned. if update, ok := s.updates[s.height-1]; ok { if thresholdPublicKeyUpdate, ok := s.thresholdPublicKeyUpdates[s.height-1]; ok { if quorumHashUpdate, ok := s.quorumHashUpdates[s.height-1]; ok { - if bytes.Equal(quorumHashUpdate, s.Set.ValidatorSet().QuorumHash) { - if err := s.Set.ValidatorSet().UpdateWithChangeSet(makeVals(update), thresholdPublicKeyUpdate, quorumHashUpdate); err != nil { + currentQuorumHash := s.ValidatorProposer.ValidatorSet().QuorumHash + if bytes.Equal(quorumHashUpdate, currentQuorumHash) { + vs := s.ValidatorProposer.ValidatorSet() + + if err := vs.UpdateWithChangeSet(makeVals(update), thresholdPublicKeyUpdate, quorumHashUpdate); err != nil { return err } } else { - - vset := types.NewValidatorSet(makeVals(update), thresholdPublicKeyUpdate, btcjson.LLMQType_5_60, + vs := types.NewValidatorSet(makeVals(update), thresholdPublicKeyUpdate, btcjson.LLMQType_5_60, quorumHashUpdate, true) - s.Set = test.Must(selectproposer.NewProposerSelector(types.ConsensusParams{}, vset, - s.height, 0, nil, nil)) + + ps, err := selectproposer.NewProposerSelector(cp, vs, s.height, 0, nil, nil) + if err != nil { + return err + } + if cp.Version.ConsensusVersion == 0 { + //consensus version 0 had an issue where first proposer didn't propose + ps.ValidatorSet().IncProposerIndex(1) + } + s.ValidatorProposer = ps } } } } } - if err := s.Set.UpdateHeightRound(s.height, 0); err != nil { + if err := s.ValidatorProposer.UpdateHeightRound(s.height, 0); err != nil { return err } }