diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 549aadc15c..e49992ef85 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -110,6 +110,7 @@ func defaultNodeConfig() node.Config { cfg.Name = "" cfg.Version = params.VersionWithCommit("", "") cfg.HTTPModules = append(cfg.HTTPModules, "quai") + cfg.HTTPModules = append(cfg.HTTPModules, "net") cfg.WSModules = append(cfg.WSModules, "quai") return cfg } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 19eee21aef..13194fc6f9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -113,6 +113,8 @@ var NodeFlags = []Flag{ QuaiStatsURLFlag, SendFullStatsFlag, IndexAddressUtxos, + ReIndex, + ValidateIndexer, StartingExpansionNumberFlag, NodeLogLevelFlag, GenesisNonce, @@ -544,6 +546,18 @@ var ( Usage: "Index address utxos" + generateEnvDoc(c_NodeFlagPrefix+"index-address-utxos"), } + ReIndex = Flag{ + Name: c_NodeFlagPrefix + "reindex", + Value: false, + Usage: "Re-index the UTXO indexer. This will take a long time!" + generateEnvDoc(c_NodeFlagPrefix+"reindex"), + } + + ValidateIndexer = Flag{ + Name: c_NodeFlagPrefix + "validate-indexer", + Value: false, + Usage: "Validate the UTXO indexer. This will take a long time!" + generateEnvDoc(c_NodeFlagPrefix+"validate-index"), + } + EnvironmentFlag = Flag{ Name: c_NodeFlagPrefix + "environment", Value: params.ColosseumName, diff --git a/cmd/utils/hierarchical_coordinator.go b/cmd/utils/hierarchical_coordinator.go index a0b43ff5f8..6246d29479 100644 --- a/cmd/utils/hierarchical_coordinator.go +++ b/cmd/utils/hierarchical_coordinator.go @@ -1,9 +1,11 @@ package utils import ( + "encoding/binary" "errors" "fmt" "math/big" + "path/filepath" "runtime/debug" "sort" "sync" @@ -11,6 +13,7 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core" + "github.com/dominant-strategies/go-quai/core/rawdb" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/event" "github.com/dominant-strategies/go-quai/internal/quaiapi" @@ -260,6 +263,12 @@ func NewHierarchicalCoordinator(p2p quai.NetworkingAPI, logLevel string, nodeWg if err != nil { log.Global.WithField("err", err).Fatal("Error opening the backend db") } + if viper.GetBool(ReIndex.Name) { + ReIndexChainIndexer() + } + if viper.GetBool(ValidateIndexer.Name) { + ValidateChainIndexer() + } hc := &HierarchicalCoordinator{ wg: nodeWg, db: db, @@ -1321,3 +1330,233 @@ func (hc *HierarchicalCoordinator) GetBackendForLocationAndOrder(location common } return nil } + +func ReIndexChainIndexer() { + providedDataDir := viper.GetString(DataDirFlag.Name) + if providedDataDir == "" { + log.Global.Fatal("Data directory not provided for reindexing") + } + dbDir := filepath.Join(filepath.Join(providedDataDir, "zone-0-0/go-quai"), "chaindata") + ancientDir := filepath.Join(dbDir, "ancient") + zoneDb, err := rawdb.Open(rawdb.OpenOptions{ + Type: "leveldb", + Directory: dbDir, + AncientsDirectory: ancientDir, + Namespace: "eth/db/chaindata/", + Cache: 512, + Handles: 5120, + ReadOnly: false, + }, common.ZONE_CTX, log.Global, common.Location{0, 0}) + if err != nil { + log.Global.WithField("err", err).Fatal("Error opening the zone db for reindexing") + } + core.ReIndexChainIndexer(zoneDb) + if err := zoneDb.Close(); err != nil { + log.Global.WithField("err", err).Fatal("Error closing the zone db") + } + time.Sleep(10 * time.Second) +} + +func ValidateChainIndexer() { + providedDataDir := viper.GetString(DataDirFlag.Name) + if providedDataDir == "" { + log.Global.Fatal("Data directory not provided for reindexing") + } + dbDir := filepath.Join(filepath.Join(providedDataDir, "zone-0-0/go-quai"), "chaindata") + ancientDir := filepath.Join(dbDir, "ancient") + zoneDb, err := rawdb.Open(rawdb.OpenOptions{ + Type: "leveldb", + Directory: dbDir, + AncientsDirectory: ancientDir, + Namespace: "eth/db/chaindata/", + Cache: 512, + Handles: 5120, + ReadOnly: false, + }, common.ZONE_CTX, log.Global, common.Location{0, 0}) + if err != nil { + log.Global.WithField("err", err).Fatal("Error opening the zone db for reindexing") + } + start := time.Now() + head := rawdb.ReadHeadBlockHash(zoneDb) + if head == (common.Hash{}) { + log.Global.Fatal("Head block hash not found") + } + headNum := rawdb.ReadHeaderNumber(zoneDb, head) + latestSetSize := rawdb.ReadUTXOSetSize(zoneDb, head) + log.Global.Infof("Starting the UTXO indexer validation for height %d set size %d", *headNum, latestSetSize) + i := 0 + utxosChecked := make(map[[34]byte]uint8) + it := zoneDb.NewIterator(rawdb.UtxoPrefix, nil) + for it.Next() { + if len(it.Key()) != rawdb.UtxoKeyLength { + continue + } + data := it.Value() + if len(data) == 0 { + log.Global.Infof("Empty key found") + continue + } + utxoProto := new(types.ProtoTxOut) + if err := proto.Unmarshal(data, utxoProto); err != nil { + log.Global.Errorf("Failed to unmarshal ProtoTxOut: %+v data: %+v key: %+v", err, data, it.Key()) + continue + } + + utxo := new(types.UtxoEntry) + if err := utxo.ProtoDecode(utxoProto); err != nil { + log.Global.WithFields(log.Fields{ + "key": it.Key(), + "data": data, + "err": err, + }).Error("Invalid utxo Proto") + continue + } + txHash, index, err := rawdb.ReverseUtxoKey(it.Key()) + if err != nil { + log.Global.WithField("err", err).Error("Failed to parse utxo key") + continue + } + u16 := make([]byte, 2) + binary.BigEndian.PutUint16(u16, index) + key := [34]byte(append(txHash.Bytes(), u16...)) + if _, exists := utxosChecked[key]; exists { + log.Global.WithField("hash", key).Error("Duplicate utxo found") + continue + } + height := rawdb.ReadUtxoToBlockHeight(zoneDb, txHash, index) + addr20 := common.BytesToAddress(utxo.Address, common.Location{0, 0}).Bytes20() + binary.BigEndian.PutUint32(addr20[16:], height) + outpoints, err := rawdb.ReadOutpointsForAddressAtBlock(zoneDb, addr20) + if err != nil { + log.Global.WithField("err", err).Error("Error reading outpoints for address") + continue + } + found := false + for _, outpoint := range outpoints { + if outpoint.TxHash == txHash && outpoint.Index == index { + utxosChecked[key] = outpoint.Denomination + found = true + } + } + if !found { + log.Global.WithFields(log.Fields{ + "tx": txHash, + "index": index, + }).Error("Utxo not found in outpoints") + prefix := append(rawdb.AddressUtxosPrefix, addr20.Bytes()[:16]...) + it2 := zoneDb.NewIterator(prefix, nil) + for it2.Next() { + if len(it.Key()) != len(rawdb.AddressUtxosPrefix)+common.AddressLength { + continue + } + addressOutpointsProto := &types.ProtoAddressOutPoints{ + OutPoints: make([]*types.ProtoOutPointAndDenomination, 0), + } + if err := proto.Unmarshal(it.Value(), addressOutpointsProto); err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal addressOutpointsProto") + continue + } + for _, outpointProto := range addressOutpointsProto.OutPoints { + outpoint := new(types.OutpointAndDenomination) + if err := outpoint.ProtoDecode(outpointProto); err != nil { + log.Global.WithFields(log.Fields{ + "err": err, + "outpoint": outpointProto, + }).Error("Invalid outpointProto") + continue + } + if outpoint.TxHash == txHash && outpoint.Index == index { + log.Global.WithFields(log.Fields{ + "tx": txHash, + "index": index, + }).Error("Utxo found in address outpoints") + utxosChecked[key] = outpoint.Denomination + found = true + } + } + } + it2.Release() + } + i++ + if i%100000 == 0 { + log.Global.Infof("Checked %d utxos out of %d total elapsed %s", i, latestSetSize, common.PrettyDuration(time.Since(start))) + } + } + it.Release() + log.Global.Infof("Checked %d utxos and %d are good, elapsed %s", i, len(utxosChecked), common.PrettyDuration(time.Since(start))) + if len(utxosChecked) != int(latestSetSize) { + log.Global.WithFields(log.Fields{ + "expected": latestSetSize, + "actual": len(utxosChecked), + }).Error("Mismatch in utxo set size") + } + log.Global.Infof("Checking for duplicates in Address Outpoints Index...") + utxosChecked_ := make(map[[34]byte]uint8) + duplicatesFound := false + it = zoneDb.NewIterator(rawdb.AddressUtxosPrefix, nil) + for it.Next() { + if len(it.Key()) != len(rawdb.AddressUtxosPrefix)+common.AddressLength { + continue + } + addressOutpointsProto := &types.ProtoAddressOutPoints{ + OutPoints: make([]*types.ProtoOutPointAndDenomination, 0), + } + if err := proto.Unmarshal(it.Value(), addressOutpointsProto); err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal addressOutpointsProto") + continue + } + for _, outpointProto := range addressOutpointsProto.OutPoints { + outpoint := new(types.OutpointAndDenomination) + if err := outpoint.ProtoDecode(outpointProto); err != nil { + log.Global.WithFields(log.Fields{ + "err": err, + "outpoint": outpointProto, + }).Error("Invalid outpointProto") + continue + } + u16 := make([]byte, 2) + binary.BigEndian.PutUint16(u16, outpoint.Index) + key := [34]byte(append(outpoint.TxHash.Bytes(), u16...)) + if _, exists := utxosChecked_[key]; exists { + log.Global.WithFields(log.Fields{ + "tx": outpoint.TxHash.String(), + "index": outpoint.Index, + }).Error("Duplicate outpoint found") + duplicatesFound = true + continue + } + utxosChecked_[key] = outpoint.Denomination + } + } + it.Release() + if len(utxosChecked_) != int(latestSetSize) { + log.Global.WithFields(log.Fields{ + "expected": latestSetSize, + "actual": len(utxosChecked_), + }).Error("Mismatch in utxo set size") + time.Sleep(5 * time.Second) + if len(utxosChecked_) > len(utxosChecked) { + log.Global.Infof("Finding diff...") + for key, val := range utxosChecked_ { + if _, exists := utxosChecked[key]; !exists { + txhash := key[:32] + index := binary.BigEndian.Uint16(key[32:]) + log.Global.WithFields(log.Fields{ + "tx": common.BytesToHash(txhash).String(), + "index": index, + "denomination": val, + }).Error("Missing key") + } + } + } + } + if duplicatesFound { + log.Global.Error("Duplicates found in address outpoints") + } else { + log.Global.Info("No duplicates found in address-outpoints index. Validation completed") + } + if err := zoneDb.Close(); err != nil { + log.Global.WithField("err", err).Fatal("Error closing the zone db") + } + time.Sleep(30 * time.Second) +} diff --git a/core/chain_indexer.go b/core/chain_indexer.go index d9e2b2fe17..b5a4ca1012 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -36,7 +36,6 @@ import ( "github.com/dominant-strategies/go-quai/event" "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/params" - "google.golang.org/protobuf/proto" ) var PruneDepth = uint64(100000000) // Number of blocks behind in which we begin pruning old block data @@ -103,8 +102,8 @@ type ChainIndexer struct { knownSections uint64 // Number of sections known to be complete (block wise) cascadedHead uint64 // Block number of the last completed section cascaded to subindexers - throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources - + throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources + qiIndexerCh chan *types.WorkObject logger *log.Logger lock sync.Mutex pruneLock sync.Mutex @@ -136,6 +135,30 @@ func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend Cha return c } +func ReIndexChainIndexer(chainDb ethdb.Database) { + backend := &BloomIndexer{ + db: chainDb, + size: params.BloomBitsBlocks, + logger: log.Global, + } + table := rawdb.NewTable(chainDb, string(rawdb.BloomBitsIndexPrefix), chainDb.Location(), chainDb.Logger()) + c := &ChainIndexer{ + chainDb: chainDb, + indexDb: table, + backend: backend, + logger: log.Global, + sectionSize: params.BloomBitsBlocks, + confirmsReq: params.BloomConfirms, + throttling: bloomThrottling, + indexAddressUtxos: true, + qiIndexerCh: make(chan *types.WorkObject), + quit: make(chan chan error), + } + + c.ctx, c.ctxCancel = context.WithCancel(context.Background()) + c.ReIndexUTXOIndexer() +} + // Start creates a goroutine to feed chain head events into the indexer for // cascading background processing. Children do not need to be started, they // are notified about new events by their parents. @@ -147,6 +170,84 @@ func (c *ChainIndexer) Start(chain ChainIndexerChain, config params.ChainConfig) go c.eventLoop(chain.CurrentHeader(), events, sub, chain.NodeCtx(), config) } +// ReIndexChainIndexer reindexes the chain indexer from block 1 +func (c *ChainIndexer) ReIndexUTXOIndexer() { + start := time.Now() + height := uint64(1) + c.logger.Infof("Reindexing UTXO Indexer at block %d", height) + it := c.chainDb.NewIterator(rawdb.AddressUtxosPrefix, nil) + for it.Next() { + if len(it.Key()) == len(rawdb.AddressUtxosPrefix)+common.AddressLength { + c.chainDb.Delete(it.Key()) + } + } + it.Release() + it = c.chainDb.NewIterator(rawdb.AddressLockupsPrefix, nil) + for it.Next() { + if len(it.Key()) == len(rawdb.AddressLockupsPrefix)+common.AddressLength { + c.chainDb.Delete(it.Key()) + } + } + it.Release() + c.logger.Info("Deleted all utxos and lockups") + hash := rawdb.ReadCanonicalHash(c.chainDb, 0) + genesis := rawdb.ReadWorkObject(c.chainDb, 0, hash, types.BlockObject) + if genesis == nil { + c.logger.Errorf("Failed to reindex UTXO Indexer at block %d: failed to ReadWorkObject", 0) + return + } + c.logger.Info("Deleting all Bloom information") + countLen := len(rawdb.BloomBitsIndexPrefix) + len([]byte("count")) + sheadLen := len(rawdb.BloomBitsIndexPrefix) + len([]byte("shead")) + 8 + it = c.chainDb.NewIterator((rawdb.BloomBitsIndexPrefix), nil) + for it.Next() { + if len(it.Key()) == countLen && len(it.Value()) == 8 { + c.chainDb.Delete(it.Key()) + } else if len(it.Key()) == sheadLen && len(it.Value()) == 32 { + c.chainDb.Delete(it.Key()) + } + } + it.Release() + it = c.chainDb.NewIterator(rawdb.BloomBitsPrefix, nil) + for it.Next() { + if len(it.Key()) == rawdb.BloomBitsKeyLength { + c.chainDb.Delete(it.Key()) + } + } + it.Release() + c.logger.Info("Deleted all Bloom information") + go c.indexerLoop(genesis, c.qiIndexerCh, common.ZONE_CTX, params.ChainConfig{Location: common.Location{0, 0}, IndexAddressUtxos: true}) + go c.updateLoop(common.ZONE_CTX) + + time.Sleep(100 * time.Millisecond) // Give indexer time to start + head := rawdb.ReadHeadBlockHash(c.chainDb) + for { + hash := rawdb.ReadCanonicalHash(c.chainDb, height) + block := rawdb.ReadWorkObject(c.chainDb, height, hash, types.BlockObject) + if block == nil { + c.logger.Errorf("Failed to reindex UTXO Indexer at block %d: failed to ReadWorkObject", height) + break + } + c.qiIndexerCh <- block + + if hash == head { + c.logger.Infof("Reindexed UTXO Indexer up to block %d hash %s took %s", height, hash.String(), common.PrettyDuration(time.Since(start))) + break + } else if hash == (common.Hash{}) { + c.logger.Errorf("Failed to reindex UTXO Indexer at block %d: failed to ReadCanonicalHash", height) + break + } + if height%1000 == 0 { + c.logger.Infof("Reindexing UTXO Indexer at block %d elapsed %s", height, common.PrettyDuration(time.Since(start))) + } + height++ + } + time.Sleep(100 * time.Millisecond) // Give indexer time to finish + if err := c.Close(); err != nil { + c.logger.WithField("err", err).Error("Failed to close chain indexer") + } +} + // Close tears down all goroutines belonging to the indexer and returns any error // that might have occurred internally. func (c *ChainIndexer) Close() error { @@ -206,6 +307,7 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.WorkObject, events chan Ch // Fire the initial new head event to start any outstanding processing c.newHead(currentHeader.NumberU64(nodeCtx), false) qiIndexerCh := make(chan *types.WorkObject, 10000) + c.qiIndexerCh = qiIndexerCh go c.indexerLoop(currentHeader, qiIndexerCh, nodeCtx, config) for { select { @@ -255,28 +357,26 @@ func (c *ChainIndexer) indexerLoop(currentHeader *types.WorkObject, qiIndexerCh // Ensure block is canonical before pruning if rawdb.ReadCanonicalHash(c.chainDb, block.NumberU64(nodeCtx)) != block.Hash() { if rawdb.ReadCanonicalHash(c.chainDb, block.NumberU64(nodeCtx)-1) != block.ParentHash(nodeCtx) { - c.logger.Errorf("Block %d sent to ChainIndexer is not canonical, skipping hash %s", block.NumberU64(nodeCtx), block.Hash()) + c.logger.Errorf("ChainIndexer: Block %d sent to ChainIndexer is not canonical, skipping hash %s", block.NumberU64(nodeCtx), block.Hash()) return } } c.PruneOldBlockData(block.NumberU64(nodeCtx) - PruneDepth) } - time1 := time.Since(start) - var validUtxoIndex bool - var addressOutpoints map[[20]byte][]*types.OutpointAndDenomination - if c.indexAddressUtxos { - validUtxoIndex = true - addressOutpoints = make(map[[20]byte][]*types.OutpointAndDenomination) + if block.Hash() == prevHash { + c.logger.WithField("block", block.NumberU64(nodeCtx)).Debug("ChainIndexer: Skipping already indexed block") + continue } + time1 := time.Since(start) time2 := time.Since(start) var time3, time4, time5 time.Duration - if block.ParentHash(nodeCtx) != prevHash && rawdb.ReadCanonicalHash(c.chainDb, prevHeader.NumberU64(nodeCtx)) != prevHash { + if block.ParentHash(nodeCtx) != prevHash { // Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then) // TODO: This seems a bit brittle, can we detect this case explicitly? commonHeader, err := rawdb.FindCommonAncestor(c.chainDb, prevHeader, block, nodeCtx) if commonHeader == nil || err != nil { - c.logger.WithField("err", err).Error("Failed to index: failed to find common ancestor") + c.logger.WithField("err", err).Error("ChainIndexer: Failed to index: failed to find common ancestor") continue } // If indexAddressUtxos flag is enabled, update the address utxo map @@ -293,7 +393,7 @@ func (c *ChainIndexer) indexerLoop(currentHeader *types.WorkObject, qiIndexerCh hashStack = append(hashStack, newHeader) newHeader = c.GetHeaderByHash(newHeader.ParentHash(nodeCtx)) if newHeader == nil { - c.logger.Error("Could not find new canonical header during reorg") + c.logger.Error("ChainIndexer: Could not find new canonical header during reorg") } // genesis check to not delete the genesis block if rawdb.IsGenesisHash(c.chainDb, newHeader.Hash()) { @@ -310,7 +410,7 @@ func (c *ChainIndexer) indexerLoop(currentHeader *types.WorkObject, qiIndexerCh prevHashStack = append(prevHashStack, prev) prev = c.GetHeaderByHash(prev.ParentHash(nodeCtx)) if prev == nil { - c.logger.Error("Could not find previously canonical header during reorg") + c.logger.Error("ChainIndexer: Could not find previously canonical header during reorg") break } // genesis check to not delete the genesis block @@ -323,11 +423,10 @@ func (c *ChainIndexer) indexerLoop(currentHeader *types.WorkObject, qiIndexerCh time3 = time.Since(start) - // Reorg out all outpoints of the reorg headers - err := c.reorgUtxoIndexer(prevHashStack, addressOutpoints, nodeCtx) + // Remove all outpoints of the reorg headers (old chain) + err := c.reorgUtxoIndexer(prevHashStack, nodeCtx) if err != nil { - c.logger.Error("Failed to reorg utxo indexer", "err", err) - validUtxoIndex = false + c.logger.Error("ChainIndexer: Failed to reorg utxo indexer", "err", err) } time4 = time.Since(start) @@ -336,10 +435,10 @@ func (c *ChainIndexer) indexerLoop(currentHeader *types.WorkObject, qiIndexerCh for i := len(hashStack) - 1; i >= 0; i-- { block := rawdb.ReadWorkObject(c.chainDb, hashStack[i].NumberU64(nodeCtx), hashStack[i].Hash(), types.BlockObject) if block == nil { - c.logger.Error("Failed to read block during reorg") + c.logger.Error("ChainIndexer: Failed to read block during reorg") continue } - c.addOutpointsToIndexer(addressOutpoints, nodeCtx, config, block) + c.addOutpointsToIndexer(nodeCtx, config, block) } } @@ -349,27 +448,15 @@ func (c *ChainIndexer) indexerLoop(currentHeader *types.WorkObject, qiIndexerCh } else { time3 = time.Since(start) if c.indexAddressUtxos { - c.addOutpointsToIndexer(addressOutpoints, nodeCtx, config, block) + c.addOutpointsToIndexer(nodeCtx, config, block) } time4 = time.Since(start) c.newHead(block.NumberU64(nodeCtx), false) time5 = time.Since(start) } - if c.indexAddressUtxos && validUtxoIndex { - err := rawdb.WriteAddressOutpoints(c.chainDb, addressOutpoints) - if err != nil { - panic(err) - } - } - time9 := time.Since(start) - for key, _ := range addressOutpoints { - delete(addressOutpoints, key) - } - addressOutpoints = nil - time10 := time.Since(start) prevHeader, prevHash = block, block.Hash() @@ -418,7 +505,7 @@ func (c *ChainIndexer) PruneOldBlockData(blockHeight uint64) { key = key[len(rawdb.UtxoPrefix) : rawdb.PrunedUtxoKeyWithDenominationLength+len(rawdb.UtxoPrefix)] createdUtxosToKeep = append(createdUtxosToKeep, key) } - c.logger.Infof("Removed %d utxo keys from block %d", len(createdUtxos)-len(createdUtxosToKeep), blockHeight) + c.logger.Infof("ChainIndexer: Removed %d utxo keys from block %d", len(createdUtxos)-len(createdUtxosToKeep), blockHeight) rawdb.WritePrunedUTXOKeys(c.chainDb, blockHeight, createdUtxosToKeep) } rawdb.DeleteCreatedUTXOKeys(c.chainDb, blockHash) @@ -709,9 +796,11 @@ func (c *ChainIndexer) removeSectionHead(section uint64) { } // addOutpointsToIndexer removes the spent outpoints and adds new utxos to the indexer. -func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map[[20]byte][]*types.OutpointAndDenomination, nodeCtx int, config params.ChainConfig, block *types.WorkObject) { - utxos := block.QiTransactions() // TODO: Need to add the coinbase outputs into the Indexer +func (c *ChainIndexer) addOutpointsToIndexer(nodeCtx int, config params.ChainConfig, block *types.WorkObject) { + utxos := block.QiTransactions() + addressOutpointsWithBlockHeight := make(map[[20]byte][]*types.OutpointAndDenomination) + addressLockups := make(map[[20]byte][]*types.Lockup) for _, tx := range utxos { for _, in := range tx.TxIn() { @@ -721,15 +810,15 @@ func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map height := rawdb.ReadUtxoToBlockHeight(c.chainDb, outpoint.TxHash, outpoint.Index) binary.BigEndian.PutUint32(address20[16:], height) if height > uint32(block.Number(nodeCtx).Uint64()) { - c.logger.Warn("Utxo is spent in a future block", "utxo", outpoint, "block", block.Number(nodeCtx)) + c.logger.Warn("ChainIndexer: Utxo is spent in a future block", "utxo", outpoint, "block", block.Number(nodeCtx)) continue } - outpointsForAddress, exists := addressOutpointsWithBlockHeight[address20] + _, exists := addressOutpointsWithBlockHeight[address20] if !exists { var err error - outpointsForAddress, err = rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, address20) + outpointsForAddress, err := rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, address20) if err != nil { - c.logger.Error("Failed to read outpoints for address", "address", address20, "err", err) + c.logger.Error("ChainIndexer: Failed to read outpoints for address", "address", address20, "err", err) continue } addressOutpointsWithBlockHeight[address20] = outpointsForAddress @@ -744,6 +833,10 @@ func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map } for i, out := range tx.TxOut() { + if common.BytesToAddress(out.Address, common.Location{0, 0}).IsInQuaiLedgerScope() { + // This is a conversion output + continue + } outpoint := types.OutPoint{ TxHash: tx.Hash(), @@ -756,21 +849,29 @@ func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map Index: outpoint.Index, Denomination: out.Denomination, } - if _, exists := addressOutpointsWithBlockHeight[address20]; !exists { - var err error - addressOutpointsWithBlockHeight[address20], err = rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, address20) - if err != nil { - c.logger.Error("Failed to read outpoints for address", "address", address20, "err", err) - continue - } - } + addressOutpointsWithBlockHeight[address20] = append(addressOutpointsWithBlockHeight[address20], outpointAndDenom) rawdb.WriteUtxoToBlockHeight(c.chainDb, outpointAndDenom.TxHash, outpointAndDenom.Index, uint32(block.NumberU64(nodeCtx))) } } for _, tx := range block.Body().ExternalTransactions() { - if tx.EtxType() == types.CoinbaseType && tx.To().IsInQiLedgerScope() { + if tx.EtxType() == types.CoinbaseType && tx.To().IsInQuaiLedgerScope() { + if len(tx.Data()) == 0 { + c.logger.Error("ChainIndexer: Coinbase transaction has no data", "tx", tx.Hash()) + continue + } + lockupByte := tx.Data()[0] + value := params.CalculateCoinbaseValueWithLockup(tx.Value(), lockupByte) + unlockHeight := block.NumberU64(nodeCtx) + LockupByteToBlockDepthFunc(lockupByte) + coinbaseAddr := tx.To().Bytes20() + binary.BigEndian.PutUint32(coinbaseAddr[16:], uint32(block.NumberU64(nodeCtx))) + addressLockups[coinbaseAddr] = append(addressLockups[coinbaseAddr], &types.Lockup{UnlockHeight: unlockHeight, Value: value}) + } else if tx.EtxType() == types.CoinbaseType && tx.To().IsInQiLedgerScope() { + if len(tx.Data()) == 0 { + c.logger.Error("ChainIndexer: Coinbase transaction has no data", "tx", tx.Hash()) + continue + } lockupByte := tx.Data()[0] // After the BigSporkFork the minimum conversion period changes to 7200 blocks var lockup *big.Int @@ -809,19 +910,15 @@ func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map Lock: lockup, } - if _, exists := addressOutpointsWithBlockHeight[coinbaseAddr]; !exists { - var err error - addressOutpointsWithBlockHeight[coinbaseAddr], err = rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, coinbaseAddr) - if err != nil { - c.logger.Error("Failed to read outpoints for address", "address", coinbaseAddr, "err", err) - continue - } - } addressOutpointsWithBlockHeight[coinbaseAddr] = append(addressOutpointsWithBlockHeight[coinbaseAddr], outpointAndDenom) rawdb.WriteUtxoToBlockHeight(c.chainDb, outpointAndDenom.TxHash, outpointAndDenom.Index, uint32(block.NumberU64(nodeCtx))) outputIndex++ } } + } else if tx.EtxType() == types.ConversionType && tx.To().IsInQuaiLedgerScope() { + coinbaseAddr := tx.To().Bytes20() + binary.BigEndian.PutUint32(coinbaseAddr[16:], uint32(block.NumberU64(nodeCtx))) + addressLockups[coinbaseAddr] = append(addressLockups[coinbaseAddr], &types.Lockup{UnlockHeight: block.NumberU64(nodeCtx) + params.NewConversionLockPeriod, Value: tx.Value()}) } else if tx.EtxType() == types.ConversionType && tx.To().IsInQiLedgerScope() { var lockup *big.Int if block.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV1 { @@ -860,14 +957,7 @@ func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map Denomination: uint8(denomination), Lock: lock, } - if _, exists := addressOutpointsWithBlockHeight[addr20]; !exists { - var err error - addressOutpointsWithBlockHeight[addr20], err = rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, addr20) - if err != nil { - c.logger.Error("Failed to read outpoints for address", "address", addr20, "err", err) - continue - } - } + addressOutpointsWithBlockHeight[addr20] = append(addressOutpointsWithBlockHeight[addr20], outpointAndDenom) rawdb.WriteUtxoToBlockHeight(c.chainDb, outpointAndDenom.TxHash, outpointAndDenom.Index, uint32(block.NumberU64(nodeCtx))) outputIndex++ @@ -875,18 +965,133 @@ func (c *ChainIndexer) addOutpointsToIndexer(addressOutpointsWithBlockHeight map } } } + + blockDepths := []uint64{ + params.NewConversionLockPeriod, + params.LockupByteToBlockDepth[1], + params.LockupByteToBlockDepth[2], + params.LockupByteToBlockDepth[3], + } + + for _, blockDepth := range blockDepths { + // Ensure we can look back far enough + if block.NumberU64(nodeCtx) <= blockDepth { + // Skip this depth if the current block height is less than or equal to the block depth + continue + } + // Calculate the target block height by subtracting the blockDepth from the current height + targetBlockHeight := block.NumberU64(nodeCtx) - blockDepth + + // Fetch the block at the calculated target height + targetBlock := c.GetBlockByNumber(targetBlockHeight) + if targetBlock == nil { + c.logger.Errorf("ChainIndexer: Unable to process block depth %d block at height %d not found", blockDepth, targetBlockHeight) + continue + } + for _, etx := range targetBlock.Body().ExternalTransactions() { + // Check if the transaction is a coinbase or conversion transaction + if (types.IsCoinBaseTx(etx) || types.IsConversionTx(etx)) && etx.To().IsInQuaiLedgerScope() { + coinbase := etx.To().Bytes20() + binary.BigEndian.PutUint32(coinbase[16:], uint32(targetBlockHeight)) + if _, exists := addressLockups[coinbase]; !exists { + lockups, err := rawdb.ReadLockupsForAddressAtBlock(c.chainDb, coinbase) + if err != nil { + c.logger.Errorf("ChainIndexer: Error reading lockups for address: %v", err) + continue + } + addressLockups[coinbase] = lockups + } + // Create a new slice to hold lockups that are not yet eligible for unlocking + var remainingLockups []*types.Lockup // Replace `LockupType` with the actual type of lockup + + for _, lockup := range addressLockups[coinbase] { + if lockup.UnlockHeight == block.NumberU64(nodeCtx) { + // Do nothing to remove the lockup by skipping it + } else if lockup.UnlockHeight < block.NumberU64(nodeCtx) { + c.logger.Errorf("Lockup unlock height is less than current block height: %d < %d", lockup.UnlockHeight, block.NumberU64(nodeCtx)) + remainingLockups = append(remainingLockups, lockup) + } else { + remainingLockups = append(remainingLockups, lockup) + } + } + + // Update the original slice with the remaining lockups + addressLockups[coinbase] = remainingLockups + } + } + } + + err := rawdb.WriteAddressOutpoints(c.chainDb, addressOutpointsWithBlockHeight) + if err != nil { + panic(err) + } + err = rawdb.WriteAddressLockups(c.chainDb, addressLockups) + if err != nil { + panic(err) + } } // reorgUtxoIndexer adds back previously removed outpoints and removes newly added outpoints. // This is done in reverse order from the old header to the common ancestor. -func (c *ChainIndexer) reorgUtxoIndexer(headers []*types.WorkObject, addressOutpoints map[[20]byte][]*types.OutpointAndDenomination, nodeCtx int) error { - for _, header := range headers { +func (c *ChainIndexer) reorgUtxoIndexer(headers []*types.WorkObject, nodeCtx int) error { - sutxos, err := rawdb.ReadSpentUTXOs(c.chainDb, header.Hash()) + for _, header := range headers { + addressOutpoints := make(map[[20]byte][]*types.OutpointAndDenomination) + addressLockups := make(map[[20]byte][]*types.Lockup) + block := rawdb.ReadWorkObject(c.chainDb, header.NumberU64(nodeCtx), header.Hash(), types.BlockObject) + if block == nil { + c.logger.Errorf("ChainIndexer: Error reading block during reorg hash: %s", block.Hash().String()) + continue + } + for _, tx := range block.QiTransactions() { + for _, out := range tx.TxOut() { + if common.BytesToAddress(out.Address, common.Location{0, 0}).IsInQuaiLedgerScope() { + // This is a conversion output + continue + } + address20 := [20]byte(out.Address) + binary.BigEndian.PutUint32(address20[16:], uint32(block.NumberU64(nodeCtx))) + // Delete all outpoints for this address and block combination + addressOutpoints[address20] = make([]*types.OutpointAndDenomination, 0) + } + } + for _, etx := range block.Body().ExternalTransactions() { + if etx.EtxType() == types.CoinbaseType && etx.To().IsInQuaiLedgerScope() { + if len(etx.Data()) == 0 { + c.logger.Error("ChainIndexer: Coinbase transaction has no data", "tx", etx.Hash()) + continue + } + coinbaseAddr := etx.To().Bytes20() + binary.BigEndian.PutUint32(coinbaseAddr[16:], uint32(block.NumberU64(nodeCtx))) + // Remove all the lockups created by this address and block + addressLockups[coinbaseAddr] = make([]*types.Lockup, 0) + } else if etx.EtxType() == types.ConversionType && etx.To().IsInQuaiLedgerScope() { + coinbaseAddr := etx.To().Bytes20() + binary.BigEndian.PutUint32(coinbaseAddr[16:], uint32(block.NumberU64(nodeCtx))) + // Remove all the lockups created by this address and block + addressLockups[coinbaseAddr] = make([]*types.Lockup, 0) + } else if etx.EtxType() == types.CoinbaseType && etx.To().IsInQiLedgerScope() { + if len(etx.Data()) == 0 { + c.logger.Error("ChainIndexer: Coinbase transaction has no data", "tx", etx.Hash()) + continue + } + coinbaseAddr := etx.To().Bytes20() + binary.BigEndian.PutUint32(coinbaseAddr[16:], uint32(block.NumberU64(nodeCtx))) + // Remove all the UTXOs created by this address and block + addressOutpoints[coinbaseAddr] = make([]*types.OutpointAndDenomination, 0) + } else if etx.EtxType() == types.ConversionType && etx.To().IsInQiLedgerScope() { + addr20 := etx.To().Bytes20() + binary.BigEndian.PutUint32(addr20[16:], uint32(block.NumberU64(nodeCtx))) + // Remove all the UTXOs created by this address and block + addressOutpoints[addr20] = make([]*types.OutpointAndDenomination, 0) + } + } + // Re-create spent UTXOs (inputs) + sutxos, err := rawdb.ReadSpentUTXOs(c.chainDb, block.Hash()) if err != nil { return err } - trimmedUtxos, err := rawdb.ReadTrimmedUTXOs(c.chainDb, header.Hash()) + trimmedUtxos, err := rawdb.ReadTrimmedUTXOs(c.chainDb, block.Hash()) if err != nil { return err } @@ -902,58 +1107,74 @@ func (c *ChainIndexer) reorgUtxoIndexer(headers []*types.WorkObject, addressOutp height := rawdb.ReadUtxoToBlockHeight(c.chainDb, sutxo.TxHash, sutxo.Index) addr20 := [20]byte(sutxo.Address) binary.BigEndian.PutUint32(addr20[16:], height) - if _, exists := addressOutpoints[addr20]; !exists { - var err error - addressOutpoints[addr20], err = rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, addr20) - if err != nil { - return err - } - } addressOutpoints[addr20] = append(addressOutpoints[addr20], outpointAndDenom) - rawdb.WriteUtxoToBlockHeight(c.chainDb, sutxo.TxHash, sutxo.Index, height) - } - utxoKeys, err := rawdb.ReadCreatedUTXOKeys(c.chainDb, header.Hash()) - if err != nil { - return err + + blockDepths := []uint64{ + params.LockupByteToBlockDepth[0], + params.LockupByteToBlockDepth[1], + params.LockupByteToBlockDepth[2], + params.LockupByteToBlockDepth[3], } - for _, key := range utxoKeys { - if len(key) == rawdb.UtxoKeyWithDenominationLength { - key = key[:rawdb.UtxoKeyLength] // The last byte of the key is the denomination (but only in CreatedUTXOKeys) - } - data, _ := c.chainDb.Get(key) - utxoProto := new(types.ProtoTxOut) - if err := proto.Unmarshal(data, utxoProto); err != nil { - continue - } - utxo := new(types.TxOut) - if err := utxo.ProtoDecode(utxoProto); err != nil { + + for _, blockDepth := range blockDepths { + // Ensure we can look back far enough + if block.NumberU64(nodeCtx) <= blockDepth { + // Skip this depth if the current block height is less than or equal to the block depth continue } - addr20 := [20]byte(utxo.Address) - binary.BigEndian.PutUint32(addr20[16:], uint32(header.NumberU64(nodeCtx))) + // Calculate the target block height by subtracting the blockDepth from the current height + targetBlockHeight := block.NumberU64(nodeCtx) - blockDepth - outpointsForAddress, exists := addressOutpoints[addr20] - if !exists { - var err error - outpointsForAddress, err = rawdb.ReadOutpointsForAddressAtBlock(c.chainDb, addr20) - if err != nil { - return err - } - addressOutpoints[addr20] = outpointsForAddress - } - txHash, index, err := rawdb.ReverseUtxoKey(key) - if err != nil { - return err + // Fetch the block at the calculated target height + targetBlock := c.GetBlockByNumber(targetBlockHeight) + if targetBlock == nil { + c.logger.Errorf("ChainIndexer: Unable to process block depth %d block at height %d not found", blockDepth, targetBlockHeight) + continue } - for i, outpointAndDenom := range addressOutpoints[addr20] { - if outpointAndDenom.TxHash == txHash && outpointAndDenom.Index == index { - addressOutpoints[addr20] = append(addressOutpoints[addr20][:i], addressOutpoints[addr20][i+1:]...) - break + for _, etx := range targetBlock.Body().ExternalTransactions() { + if etx.EtxType() == types.CoinbaseType && etx.To().IsInQuaiLedgerScope() { + if len(etx.Data()) == 0 { + c.logger.Error("ChainIndexer: Coinbase transaction has no data", "tx", etx.Hash()) + continue + } + lockupByte := etx.Data()[0] + value := params.CalculateCoinbaseValueWithLockup(etx.Value(), lockupByte) + unlockHeight := targetBlockHeight + LockupByteToBlockDepthFunc(lockupByte) + coinbaseAddr := etx.To().Bytes20() + binary.BigEndian.PutUint32(coinbaseAddr[16:], uint32(targetBlockHeight)) + if _, exists := addressLockups[coinbaseAddr]; !exists { + lockups, err := rawdb.ReadLockupsForAddressAtBlock(c.chainDb, coinbaseAddr) + if err != nil { + c.logger.Errorf("ChainIndexer: Error reading lockups for address: %v", err) + continue + } + addressLockups[coinbaseAddr] = lockups + } + addressLockups[coinbaseAddr] = append(addressLockups[coinbaseAddr], &types.Lockup{UnlockHeight: unlockHeight, Value: value}) + } else if etx.EtxType() == types.ConversionType && etx.To().IsInQuaiLedgerScope() { + addr20 := etx.To().Bytes20() + binary.BigEndian.PutUint32(addr20[16:], uint32(targetBlockHeight)) + if _, exists := addressLockups[addr20]; !exists { + lockups, err := rawdb.ReadLockupsForAddressAtBlock(c.chainDb, addr20) + if err != nil { + c.logger.Errorf("ChainIndexer: Error reading lockups for address: %v", err) + continue + } + addressLockups[addr20] = lockups + } + addressLockups[addr20] = append(addressLockups[addr20], &types.Lockup{UnlockHeight: targetBlockHeight + params.NewConversionLockPeriod, Value: etx.Value()}) } } } - + err = rawdb.WriteAddressOutpoints(c.chainDb, addressOutpoints) + if err != nil { + panic(err) + } + err = rawdb.WriteAddressLockups(c.chainDb, addressLockups) + if err != nil { + panic(err) + } } return nil } @@ -975,3 +1196,22 @@ func (c *ChainIndexer) GetHeaderByHash(hash common.Hash) *types.WorkObject { } return header } + +func (c *ChainIndexer) GetBlockByNumber(number uint64) *types.WorkObject { + hash := rawdb.ReadCanonicalHash(c.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + block := rawdb.ReadWorkObject(c.chainDb, number, hash, types.BlockObject) + if block == nil { + return nil + } + return block +} + +func LockupByteToBlockDepthFunc(lockupByte uint8) uint64 { + if lockupByte == 0 { + return params.NewConversionLockPeriod + } + return params.LockupByteToBlockDepth[lockupByte] +} diff --git a/core/core.go b/core/core.go index fe2a1d5a8a..6e37cfc5a9 100644 --- a/core/core.go +++ b/core/core.go @@ -1278,6 +1278,24 @@ func (c *Core) GetOutpointsByAddress(address common.Address) ([]*types.OutpointA return rawdb.ReadOutpointsForAddress(c.sl.sliceDb, address) } +func (c *Core) GetLockupsByAddressAndRange(address common.Address, start, end uint32) ([]*types.Lockup, error) { + lockups := make([]*types.Lockup, 0) + for i := start; i <= end; i++ { + addr20 := address.Bytes20() + binary.BigEndian.PutUint32(addr20[16:], i) + lockupsAtBlock, err := rawdb.ReadLockupsForAddressAtBlock(c.sl.sliceDb, addr20) + if err != nil { + return nil, err + } + lockups = append(lockups, lockupsAtBlock...) + } + return lockups, nil +} + +func (c *Core) GetLockupsByAddress(address common.Address) ([]*types.Lockup, error) { + return rawdb.ReadLockupsForAddress(c.sl.sliceDb, address) +} + func (c *Core) GetUTXOsByAddress(address common.Address) ([]*types.UtxoEntry, error) { outpointsForAddress, err := c.GetOutpointsByAddress(address) if err != nil { @@ -1288,7 +1306,7 @@ func (c *Core) GetUTXOsByAddress(address common.Address) ([]*types.UtxoEntry, er for _, outpoint := range outpointsForAddress { entry := rawdb.GetUTXO(c.sl.sliceDb, outpoint.TxHash, outpoint.Index) if entry == nil { - return nil, errors.New("failed to get UTXO for address") + continue } utxos = append(utxos, entry) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 9051dabe16..156d814c07 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1215,6 +1215,92 @@ func DeleteOutpointsForAddress(db ethdb.KeyValueWriter, address [20]byte) { } } +func WriteAddressLockups(db ethdb.KeyValueWriter, lockupMap map[[20]byte][]*types.Lockup) error { + for addressWithBlockHeight, lockups := range lockupMap { + if err := WriteLockupsForAddressAtBlock(db, addressWithBlockHeight, lockups); err != nil { + return err + } + } + return nil +} + +func WriteLockupsForAddressAtBlock(db ethdb.KeyValueWriter, address [20]byte, lockups []*types.Lockup) error { + addressLockupsProto := &types.ProtoLockups{ + Lockups: make([]*types.ProtoLockup, 0, len(lockups)), + } + + for _, lockup := range lockups { + lockupProto := &types.ProtoLockup{ + Value: lockup.Value.Bytes(), + UnlockHeight: lockup.UnlockHeight, + } + + addressLockupsProto.Lockups = append(addressLockupsProto.Lockups, lockupProto) + } + + // Now, marshal utxosProto to protobuf bytes + data, err := proto.Marshal(addressLockupsProto) + if err != nil { + db.Logger().WithField("err", err).Fatal("Failed to rlp encode utxos") + } + if err := db.Put(addressLockupsKey(address), data); err != nil { + db.Logger().WithField("err", err).Fatal("Failed to store utxos") + } + return nil +} + +func ReadLockupsForAddressAtBlock(db ethdb.Reader, address [20]byte) ([]*types.Lockup, error) { + // Try to look up the data in leveldb. + data, _ := db.Get(addressLockupsKey(address)) + if len(data) == 0 { + return []*types.Lockup{}, nil + } + addressLockupsProto := &types.ProtoLockups{ + Lockups: make([]*types.ProtoLockup, 0), + } + if err := proto.Unmarshal(data, addressLockupsProto); err != nil { + return nil, err + } + lockups := make([]*types.Lockup, 0, len(addressLockupsProto.Lockups)) + + for _, lockupProto := range addressLockupsProto.Lockups { + lockup := &types.Lockup{ + Value: new(big.Int).SetBytes(lockupProto.Value), + UnlockHeight: lockupProto.UnlockHeight, + } + lockups = append(lockups, lockup) + } + + return lockups, nil +} + +func ReadLockupsForAddress(db ethdb.Database, address common.Address) ([]*types.Lockup, error) { + prefix := append(AddressLockupsPrefix, address.Bytes()[:16]...) + it := db.NewIterator(prefix, nil) + defer it.Release() + lockups := make([]*types.Lockup, 0) + for it.Next() { + if len(it.Key()) != len(AddressUtxosPrefix)+common.AddressLength { + continue + } + addressLockupsProto := &types.ProtoLockups{ + Lockups: make([]*types.ProtoLockup, 0), + } + if err := proto.Unmarshal(it.Value(), addressLockupsProto); err != nil { + db.Logger().WithField("err", err).Fatal("Failed to proto Unmarshal addressOutpointsProto") + return nil, err + } + for _, lockupProto := range addressLockupsProto.Lockups { + lockup := &types.Lockup{ + Value: new(big.Int).SetBytes(lockupProto.Value), + UnlockHeight: lockupProto.UnlockHeight, + } + lockups = append(lockups, lockup) + } + } + return lockups, nil +} + func WriteGenesisHashes(db ethdb.KeyValueWriter, hashes common.Hashes) { protoHashes := hashes.ProtoEncode() data, err := proto.Marshal(protoHashes) diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go index c46e849f28..1eb319eaa5 100644 --- a/core/rawdb/accessors_indexes.go +++ b/core/rawdb/accessors_indexes.go @@ -156,7 +156,7 @@ func DeleteBloombits(db ethdb.Database, bit uint, from uint64, to uint64) { if bytes.Compare(it.Key(), end) >= 0 { break } - if len(it.Key()) != len(bloomBitsPrefix)+2+8+32 { + if len(it.Key()) != BloomBitsKeyLength { continue } db.Delete(it.Key()) diff --git a/core/rawdb/database.go b/core/rawdb/database.go index f0e9afdf52..f21158769b 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -415,7 +415,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte, logger *log. preimages.Add(size) case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): metadata.Add(size) - case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): + case bytes.HasPrefix(key, BloomBitsPrefix) && len(key) == (len(BloomBitsPrefix)+10+common.HashLength): bloomBits.Add(size) case bytes.HasPrefix(key, BloomBitsIndexPrefix): bloomBits.Add(size) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 44038b653d..8a89753ebc 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -81,7 +81,8 @@ var ( workObjectBodyPrefix = []byte("wb") //workObjectBodyPrefix + hash -> []common.Hash badHashesListPrefix = []byte("bh") inboundEtxsPrefix = []byte("ie") // inboundEtxsPrefix + hash -> types.Transactions - AddressUtxosPrefix = []byte("au") // addressUtxosPrefix + hash -> []types.UtxoEntry + AddressUtxosPrefix = []byte("au") // addressUtxosPrefix + address -> []types.UtxoEntry + AddressLockupsPrefix = []byte("al") // addressLockupsPrefix + address -> []types.Lockup utxoToBlockHeightPrefix = []byte("ub") // utxoToBlockHeightPrefix + hash -> uint64 processedStatePrefix = []byte("ps") // processedStatePrefix + hash -> boolean multiSetPrefix = []byte("ms") // multiSetPrefix + hash -> multiset @@ -103,7 +104,7 @@ var ( bloomPrefix = []byte("bl") // bloomPrefix + hash -> bloom at block txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata - bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits + BloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value CodePrefix = []byte("c") // CodePrefix + code hash -> account code @@ -239,9 +240,11 @@ func storageSnapshotsKey(accountHash common.Hash) []byte { return append(SnapshotStoragePrefix, accountHash.Bytes()...) } +var BloomBitsKeyLength = len(BloomBitsPrefix) + 2 + 8 + common.HashLength + // bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { - key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...) + key := append(append(BloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...) binary.BigEndian.PutUint16(key[1:], uint16(bit)) binary.BigEndian.PutUint64(key[3:], section) @@ -305,6 +308,10 @@ func addressUtxosKey(address [20]byte) []byte { return append(AddressUtxosPrefix, address[:]...) } +func addressLockupsKey(address [20]byte) []byte { + return append(AddressLockupsPrefix, address[:]...) +} + var UtxoKeyLength = len(UtxoPrefix) + common.HashLength + 2 // This can be optimized via VLQ encoding as btcd has done diff --git a/core/state_processor.go b/core/state_processor.go index 9c42f219f3..c6c2fc096c 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -1037,6 +1037,9 @@ func ValidateQiTxInputs(tx *types.Transaction, chain ChainContext, db ethdb.Read if tx.Type() != types.QiTxType { return nil, fmt.Errorf("tx %032x is not a QiTx", tx.Hash()) } + if tx.ChainId().Cmp(signer.ChainID()) != 0 { + return nil, fmt.Errorf("tx %032x has wrong chain ID", tx.Hash()) + } totalQitIn := big.NewInt(0) addresses := make(map[common.AddressBytes]struct{}) inputs := make(map[uint]uint64) diff --git a/core/state_transition.go b/core/state_transition.go index cb104e3b3d..b817e28d24 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -261,7 +261,7 @@ func (st *StateTransition) preCheck() error { } // Make sure that transaction gasPrice is greater than the baseFee // Skip the checks if gas fields are zero and baseFee was explicitly disabled (eth_call) - if !st.evm.Config.NoBaseFee || st.gasPrice.BitLen() > 0 || st.gasPrice.BitLen() > 0 { + if !st.evm.Config.NoBaseFee || st.gasPrice.BitLen() > 0 || st.minerTip.BitLen() > 0 { if l := st.gasPrice.BitLen(); l > 256 { return fmt.Errorf("%w: address %v, gasPrice bit length: %d", ErrFeeCapVeryHigh, st.msg.From().Hex(), l) diff --git a/core/tx_pool.go b/core/tx_pool.go index d2db17c5cc..13ed5a67c6 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1479,7 +1479,13 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { // Get returns a transaction if it is contained in the pool and nil otherwise. func (pool *TxPool) Get(hash common.Hash) *types.Transaction { - return pool.all.Get(hash) + tx := pool.all.Get(hash) + if tx == nil { + if qiTx, ok := pool.qiPool.Get(hash); ok { + return qiTx.Tx() + } + } + return tx } // Has returns an indicator whether txpool has a transaction cached with the diff --git a/core/types/bloom9.go b/core/types/bloom9.go index 9b2965b251..d1908a821e 100644 --- a/core/types/bloom9.go +++ b/core/types/bloom9.go @@ -40,6 +40,19 @@ const ( // Bloom represents a 2048 bit bloom filter. type Bloom [BloomByteLength]byte +type LegacyBloom [256]byte + +// MarshalText encodes b as a hex string with 0x prefix. +func (b LegacyBloom) MarshalText() ([]byte, error) { + return hexutil.Bytes(b[:]).MarshalText() +} + +func (b Bloom) ToLegacyBloom() LegacyBloom { + var legacy LegacyBloom + copy(legacy[:], b[:]) + return legacy +} + // BytesToBloom converts a byte slice to a bloom filter. // It panics if b is not of suitable size. func BytesToBloom(b []byte) Bloom { diff --git a/core/types/proto_block.pb.go b/core/types/proto_block.pb.go index 646cbb806c..f1e6fcc2c9 100644 --- a/core/types/proto_block.pb.go +++ b/core/types/proto_block.pb.go @@ -2613,6 +2613,104 @@ func (x *ProtoBetas) GetBeta1() []byte { return nil } +type ProtoLockup struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + UnlockHeight uint64 `protobuf:"varint,2,opt,name=unlock_height,json=unlockHeight,proto3" json:"unlock_height,omitempty"` +} + +func (x *ProtoLockup) Reset() { + *x = ProtoLockup{} + mi := &file_core_types_proto_block_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProtoLockup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoLockup) ProtoMessage() {} + +func (x *ProtoLockup) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoLockup.ProtoReflect.Descriptor instead. +func (*ProtoLockup) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{40} +} + +func (x *ProtoLockup) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *ProtoLockup) GetUnlockHeight() uint64 { + if x != nil { + return x.UnlockHeight + } + return 0 +} + +type ProtoLockups struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Lockups []*ProtoLockup `protobuf:"bytes,1,rep,name=lockups,proto3" json:"lockups,omitempty"` +} + +func (x *ProtoLockups) Reset() { + *x = ProtoLockups{} + mi := &file_core_types_proto_block_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProtoLockups) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoLockups) ProtoMessage() {} + +func (x *ProtoLockups) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[41] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoLockups.ProtoReflect.Descriptor instead. +func (*ProtoLockups) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{41} +} + +func (x *ProtoLockups) GetLockups() []*ProtoLockup { + if x != nil { + return x.Lockups + } + return nil +} + var File_core_types_proto_block_proto protoreflect.FileDescriptor var file_core_types_proto_block_proto_rawDesc = []byte{ @@ -3161,11 +3259,19 @@ var file_core_types_proto_block_proto_rawDesc = []byte{ 0x66, 0x22, 0x38, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x65, 0x74, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x74, 0x61, 0x30, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x65, 0x74, 0x61, 0x30, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x74, 0x61, 0x31, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x33, 0x5a, 0x31, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, 0x61, - 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, 0x6f, - 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x65, 0x74, 0x61, 0x31, 0x22, 0x48, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x75, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3c, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x6b, + 0x75, 0x70, 0x73, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3180,7 +3286,7 @@ func file_core_types_proto_block_proto_rawDescGZIP() []byte { return file_core_types_proto_block_proto_rawDescData } -var file_core_types_proto_block_proto_msgTypes = make([]protoimpl.MessageInfo, 41) +var file_core_types_proto_block_proto_msgTypes = make([]protoimpl.MessageInfo, 43) var file_core_types_proto_block_proto_goTypes = []any{ (*ProtoHeader)(nil), // 0: block.ProtoHeader (*ProtoTransaction)(nil), // 1: block.ProtoTransaction @@ -3222,51 +3328,53 @@ var file_core_types_proto_block_proto_goTypes = []any{ (*ProtoTokenChoiceArray)(nil), // 37: block.ProtoTokenChoiceArray (*ProtoTokenChoice)(nil), // 38: block.ProtoTokenChoice (*ProtoBetas)(nil), // 39: block.ProtoBetas - nil, // 40: block.ProtoTrimDepths.TrimDepthsEntry - (*common.ProtoHash)(nil), // 41: common.ProtoHash - (*common.ProtoLocation)(nil), // 42: common.ProtoLocation - (*common.ProtoAddress)(nil), // 43: common.ProtoAddress - (*common.ProtoHashes)(nil), // 44: common.ProtoHashes + (*ProtoLockup)(nil), // 40: block.ProtoLockup + (*ProtoLockups)(nil), // 41: block.ProtoLockups + nil, // 42: block.ProtoTrimDepths.TrimDepthsEntry + (*common.ProtoHash)(nil), // 43: common.ProtoHash + (*common.ProtoLocation)(nil), // 44: common.ProtoLocation + (*common.ProtoAddress)(nil), // 45: common.ProtoAddress + (*common.ProtoHashes)(nil), // 46: common.ProtoHashes } var file_core_types_proto_block_proto_depIdxs = []int32{ - 41, // 0: block.ProtoHeader.parent_hash:type_name -> common.ProtoHash - 41, // 1: block.ProtoHeader.uncle_hash:type_name -> common.ProtoHash - 41, // 2: block.ProtoHeader.evm_root:type_name -> common.ProtoHash - 41, // 3: block.ProtoHeader.tx_hash:type_name -> common.ProtoHash - 41, // 4: block.ProtoHeader.outbound_etx_hash:type_name -> common.ProtoHash - 41, // 5: block.ProtoHeader.etx_rollup_hash:type_name -> common.ProtoHash - 41, // 6: block.ProtoHeader.manifest_hash:type_name -> common.ProtoHash - 41, // 7: block.ProtoHeader.receipt_hash:type_name -> common.ProtoHash - 42, // 8: block.ProtoHeader.location:type_name -> common.ProtoLocation - 41, // 9: block.ProtoHeader.mix_hash:type_name -> common.ProtoHash - 41, // 10: block.ProtoHeader.utxo_root:type_name -> common.ProtoHash - 41, // 11: block.ProtoHeader.etx_set_root:type_name -> common.ProtoHash - 41, // 12: block.ProtoHeader.etx_eligible_slices:type_name -> common.ProtoHash - 41, // 13: block.ProtoHeader.prime_terminus_hash:type_name -> common.ProtoHash - 41, // 14: block.ProtoHeader.interlink_root_hash:type_name -> common.ProtoHash + 43, // 0: block.ProtoHeader.parent_hash:type_name -> common.ProtoHash + 43, // 1: block.ProtoHeader.uncle_hash:type_name -> common.ProtoHash + 43, // 2: block.ProtoHeader.evm_root:type_name -> common.ProtoHash + 43, // 3: block.ProtoHeader.tx_hash:type_name -> common.ProtoHash + 43, // 4: block.ProtoHeader.outbound_etx_hash:type_name -> common.ProtoHash + 43, // 5: block.ProtoHeader.etx_rollup_hash:type_name -> common.ProtoHash + 43, // 6: block.ProtoHeader.manifest_hash:type_name -> common.ProtoHash + 43, // 7: block.ProtoHeader.receipt_hash:type_name -> common.ProtoHash + 44, // 8: block.ProtoHeader.location:type_name -> common.ProtoLocation + 43, // 9: block.ProtoHeader.mix_hash:type_name -> common.ProtoHash + 43, // 10: block.ProtoHeader.utxo_root:type_name -> common.ProtoHash + 43, // 11: block.ProtoHeader.etx_set_root:type_name -> common.ProtoHash + 43, // 12: block.ProtoHeader.etx_eligible_slices:type_name -> common.ProtoHash + 43, // 13: block.ProtoHeader.prime_terminus_hash:type_name -> common.ProtoHash + 43, // 14: block.ProtoHeader.interlink_root_hash:type_name -> common.ProtoHash 5, // 15: block.ProtoTransaction.access_list:type_name -> block.ProtoAccessList - 41, // 16: block.ProtoTransaction.originating_tx_hash:type_name -> common.ProtoHash + 43, // 16: block.ProtoTransaction.originating_tx_hash:type_name -> common.ProtoHash 25, // 17: block.ProtoTransaction.tx_ins:type_name -> block.ProtoTxIns 26, // 18: block.ProtoTransaction.tx_outs:type_name -> block.ProtoTxOuts - 41, // 19: block.ProtoTransaction.parent_hash:type_name -> common.ProtoHash - 41, // 20: block.ProtoTransaction.mix_hash:type_name -> common.ProtoHash + 43, // 19: block.ProtoTransaction.parent_hash:type_name -> common.ProtoHash + 43, // 20: block.ProtoTransaction.mix_hash:type_name -> common.ProtoHash 1, // 21: block.ProtoTransactions.transactions:type_name -> block.ProtoTransaction 0, // 22: block.ProtoHeaders.headers:type_name -> block.ProtoHeader - 41, // 23: block.ProtoManifest.manifest:type_name -> common.ProtoHash + 43, // 23: block.ProtoManifest.manifest:type_name -> common.ProtoHash 15, // 24: block.ProtoAccessList.access_tuples:type_name -> block.ProtoAccessTuple - 41, // 25: block.ProtoWorkObjectHeader.header_hash:type_name -> common.ProtoHash - 41, // 26: block.ProtoWorkObjectHeader.parent_hash:type_name -> common.ProtoHash - 41, // 27: block.ProtoWorkObjectHeader.tx_hash:type_name -> common.ProtoHash - 42, // 28: block.ProtoWorkObjectHeader.location:type_name -> common.ProtoLocation - 41, // 29: block.ProtoWorkObjectHeader.mix_hash:type_name -> common.ProtoHash - 43, // 30: block.ProtoWorkObjectHeader.primary_coinbase:type_name -> common.ProtoAddress + 43, // 25: block.ProtoWorkObjectHeader.header_hash:type_name -> common.ProtoHash + 43, // 26: block.ProtoWorkObjectHeader.parent_hash:type_name -> common.ProtoHash + 43, // 27: block.ProtoWorkObjectHeader.tx_hash:type_name -> common.ProtoHash + 44, // 28: block.ProtoWorkObjectHeader.location:type_name -> common.ProtoLocation + 43, // 29: block.ProtoWorkObjectHeader.mix_hash:type_name -> common.ProtoHash + 45, // 30: block.ProtoWorkObjectHeader.primary_coinbase:type_name -> common.ProtoAddress 6, // 31: block.ProtoWorkObjectHeaders.wo_headers:type_name -> block.ProtoWorkObjectHeader 0, // 32: block.ProtoWorkObjectBody.header:type_name -> block.ProtoHeader 2, // 33: block.ProtoWorkObjectBody.transactions:type_name -> block.ProtoTransactions 7, // 34: block.ProtoWorkObjectBody.uncles:type_name -> block.ProtoWorkObjectHeaders 2, // 35: block.ProtoWorkObjectBody.outbound_etxs:type_name -> block.ProtoTransactions 4, // 36: block.ProtoWorkObjectBody.manifest:type_name -> block.ProtoManifest - 44, // 37: block.ProtoWorkObjectBody.interlink_hashes:type_name -> common.ProtoHashes + 46, // 37: block.ProtoWorkObjectBody.interlink_hashes:type_name -> common.ProtoHashes 6, // 38: block.ProtoWorkObject.wo_header:type_name -> block.ProtoWorkObjectHeader 8, // 39: block.ProtoWorkObject.wo_body:type_name -> block.ProtoWorkObjectBody 1, // 40: block.ProtoWorkObject.tx:type_name -> block.ProtoTransaction @@ -3275,19 +3383,19 @@ var file_core_types_proto_block_proto_depIdxs = []int32{ 11, // 43: block.ProtoWorkObjectBlocksView.work_objects:type_name -> block.ProtoWorkObjectBlockView 9, // 44: block.ProtoWorkObjectHeaderView.work_object:type_name -> block.ProtoWorkObject 9, // 45: block.ProtoWorkObjectShareView.work_object:type_name -> block.ProtoWorkObject - 41, // 46: block.ProtoAccessTuple.storage_key:type_name -> common.ProtoHash + 43, // 46: block.ProtoAccessTuple.storage_key:type_name -> common.ProtoHash 19, // 47: block.ProtoReceiptForStorage.logs:type_name -> block.ProtoLogsForStorage - 41, // 48: block.ProtoReceiptForStorage.tx_hash:type_name -> common.ProtoHash - 43, // 49: block.ProtoReceiptForStorage.contract_address:type_name -> common.ProtoAddress + 43, // 48: block.ProtoReceiptForStorage.tx_hash:type_name -> common.ProtoHash + 45, // 49: block.ProtoReceiptForStorage.contract_address:type_name -> common.ProtoAddress 2, // 50: block.ProtoReceiptForStorage.outbound_etxs:type_name -> block.ProtoTransactions 16, // 51: block.ProtoReceiptsForStorage.receipts:type_name -> block.ProtoReceiptForStorage - 43, // 52: block.ProtoLogForStorage.address:type_name -> common.ProtoAddress - 41, // 53: block.ProtoLogForStorage.topics:type_name -> common.ProtoHash + 45, // 52: block.ProtoLogForStorage.address:type_name -> common.ProtoAddress + 43, // 53: block.ProtoLogForStorage.topics:type_name -> common.ProtoHash 18, // 54: block.ProtoLogsForStorage.logs:type_name -> block.ProtoLogForStorage 9, // 55: block.ProtoPendingHeader.wo:type_name -> block.ProtoWorkObject 21, // 56: block.ProtoPendingHeader.termini:type_name -> block.ProtoTermini - 41, // 57: block.ProtoTermini.dom_termini:type_name -> common.ProtoHash - 41, // 58: block.ProtoTermini.sub_termini:type_name -> common.ProtoHash + 43, // 57: block.ProtoTermini.dom_termini:type_name -> common.ProtoHash + 43, // 58: block.ProtoTermini.sub_termini:type_name -> common.ProtoHash 9, // 59: block.ProtoPendingEtxs.header:type_name -> block.ProtoWorkObject 2, // 60: block.ProtoPendingEtxs.outbound_etxs:type_name -> block.ProtoTransactions 9, // 61: block.ProtoPendingEtxsRollup.header:type_name -> block.ProtoWorkObject @@ -3295,20 +3403,21 @@ var file_core_types_proto_block_proto_depIdxs = []int32{ 27, // 63: block.ProtoTxIns.tx_ins:type_name -> block.ProtoTxIn 29, // 64: block.ProtoTxOuts.tx_outs:type_name -> block.ProtoTxOut 28, // 65: block.ProtoTxIn.previous_out_point:type_name -> block.ProtoOutPoint - 41, // 66: block.ProtoOutPoint.hash:type_name -> common.ProtoHash - 41, // 67: block.ProtoOutPointAndDenomination.hash:type_name -> common.ProtoHash + 43, // 66: block.ProtoOutPoint.hash:type_name -> common.ProtoHash + 43, // 67: block.ProtoOutPointAndDenomination.hash:type_name -> common.ProtoHash 30, // 68: block.ProtoAddressOutPoints.out_points:type_name -> block.ProtoOutPointAndDenomination 28, // 69: block.ProtoSpentUTXO.outpoint:type_name -> block.ProtoOutPoint 29, // 70: block.ProtoSpentUTXO.sutxo:type_name -> block.ProtoTxOut 32, // 71: block.ProtoSpentUTXOs.sutxos:type_name -> block.ProtoSpentUTXO - 40, // 72: block.ProtoTrimDepths.trim_depths:type_name -> block.ProtoTrimDepths.TrimDepthsEntry + 42, // 72: block.ProtoTrimDepths.trim_depths:type_name -> block.ProtoTrimDepths.TrimDepthsEntry 37, // 73: block.ProtoTokenChoiceSet.token_choice_array:type_name -> block.ProtoTokenChoiceArray 38, // 74: block.ProtoTokenChoiceArray.token_choices:type_name -> block.ProtoTokenChoice - 75, // [75:75] is the sub-list for method output_type - 75, // [75:75] is the sub-list for method input_type - 75, // [75:75] is the sub-list for extension type_name - 75, // [75:75] is the sub-list for extension extendee - 0, // [0:75] is the sub-list for field type_name + 40, // 75: block.ProtoLockups.lockups:type_name -> block.ProtoLockup + 76, // [76:76] is the sub-list for method output_type + 76, // [76:76] is the sub-list for method input_type + 76, // [76:76] is the sub-list for extension type_name + 76, // [76:76] is the sub-list for extension extendee + 0, // [0:76] is the sub-list for field type_name } func init() { file_core_types_proto_block_proto_init() } @@ -3340,7 +3449,7 @@ func file_core_types_proto_block_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_core_types_proto_block_proto_rawDesc, NumEnums: 0, - NumMessages: 41, + NumMessages: 43, NumExtensions: 0, NumServices: 0, }, diff --git a/core/types/proto_block.proto b/core/types/proto_block.proto index df1fcc3768..577784fae6 100644 --- a/core/types/proto_block.proto +++ b/core/types/proto_block.proto @@ -243,4 +243,13 @@ message ProtoTokenChoice { message ProtoBetas { bytes beta0 = 1; bytes beta1 = 2; +} + +message ProtoLockup { + bytes value = 1; + uint64 unlock_height = 2; +} + +message ProtoLockups { + repeated ProtoLockup lockups = 1; } \ No newline at end of file diff --git a/core/types/quai_tx.go b/core/types/quai_tx.go index 20f911c955..0b931f1714 100644 --- a/core/types/quai_tx.go +++ b/core/types/quai_tx.go @@ -135,3 +135,8 @@ func (tx *QuaiTx) setTo(to common.Address) { func (tx *QuaiTx) setValue(value *big.Int) { panic("quai TX does not have set value method") } + +type Lockup struct { + UnlockHeight uint64 + Value *big.Int +} diff --git a/core/types/utxo.go b/core/types/utxo.go index 2ead36f234..aa8cc89272 100644 --- a/core/types/utxo.go +++ b/core/types/utxo.go @@ -320,6 +320,9 @@ func (txOut TxOut) ProtoEncode() (*ProtoTxOut, error) { func (txOut *TxOut) ProtoDecode(protoTxOut *ProtoTxOut) error { // check if protoTxOut.Denomination is above the max uint8 value + if protoTxOut.Denomination == nil { + return errors.New("protoTxOut.Denomination is nil") + } if *protoTxOut.Denomination > math.MaxUint8 { return errors.New("protoTxOut.Denomination is above the max uint8 value") } @@ -339,6 +342,17 @@ func NewTxOut(denomination uint8, address []byte, lock *big.Int) *TxOut { } } +type RPCTxIn struct { + PreviousOutPoint OutpointJSON `json:"previousOutPoint"` + PubKey hexutil.Bytes `json:"pubKey"` +} + +type RPCTxOut struct { + Denomination hexutil.Uint `json:"denomination"` + Address common.MixedcaseAddress `json:"address"` + Lock *hexutil.Big `json:"lock"` +} + // UtxoEntry houses details about an individual transaction output in a utxo // view such as whether or not it was contained in a coinbase tx, the height of // the block that contains the tx, whether or not it is spent, its public key diff --git a/interfaces.go b/interfaces.go index 87872bbc7f..f35e2d06fb 100644 --- a/interfaces.go +++ b/interfaces.go @@ -122,6 +122,10 @@ type CallMsg struct { Data []byte // input data, usually an ABI-encoded contract method invocation AccessList types.AccessList // access list + // Support for Qi (UTXO) transaction + TxIn types.TxIns `json:"txIn,omitempty"` + TxOut types.TxOuts `json:"txOut,omitempty"` + TxType uint8 `json:"txType,omitempty"` } // A ContractCaller provides contract calls, essentially transactions that are executed by diff --git a/internal/quaiapi/api.go b/internal/quaiapi/api.go index 45f507cf67..635a00dbe1 100644 --- a/internal/quaiapi/api.go +++ b/internal/quaiapi/api.go @@ -321,9 +321,9 @@ func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Addre // * When blockNr is -1 the chain head is returned. // * When blockNr is -2 the pending chain head is returned. func (s *PublicBlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { - header, err := s.b.HeaderByNumber(ctx, number) - if header != nil && err == nil { - response := RPCMarshalETHHeader(header.Header()) //TODO: mmtx this function will break once extra fields are stripped from header. + wo, err := s.b.BlockByNumber(ctx, number) + if wo != nil && err == nil { + response := RPCMarshalETHHeader(wo.Header(), wo.WorkObjectHeader()) //TODO: mmtx this function will break once extra fields are stripped from header. if number == rpc.PendingBlockNumber { // Pending header need to nil out a few fields for _, field := range []string{"hash", "nonce", "miner"} { @@ -337,9 +337,9 @@ func (s *PublicBlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc. // GetHeaderByHash returns the requested header by hash. func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) map[string]interface{} { - header, _ := s.b.HeaderByHash(ctx, hash) - if header != nil { - return RPCMarshalETHHeader(header.Header()) + wo := s.b.GetBlockByHash(hash) + if wo != nil { + return RPCMarshalETHHeader(wo.Header(), wo.WorkObjectHeader()) } return nil } @@ -352,7 +352,7 @@ func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.H func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByNumber(ctx, number) if block != nil && err == nil { - response, err := s.rpcMarshalBlock(ctx, block, true, fullTx) + response, err := RPCMarshalETHBlock(block, true, fullTx, s.b.NodeLocation()) if err == nil && number == rpc.PendingBlockNumber { // Pending blocks need to nil out a few fields for _, field := range []string{"hash", "nonce", "miner"} { @@ -369,7 +369,7 @@ func (s *PublicBlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.B func (s *PublicBlockChainAPI) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (map[string]interface{}, error) { block, err := s.b.BlockByHash(ctx, hash) if block != nil { - return s.rpcMarshalBlock(ctx, block, true, fullTx) + return RPCMarshalETHBlock(block, true, fullTx, s.b.NodeLocation()) } return nil, err } @@ -573,14 +573,12 @@ func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash // this makes sure resources are cleaned up. defer cancel() - if args.Nonce == nil { - internal, err := args.from(b.NodeLocation()).InternalAndQuaiAddress() - if err != nil { - return nil, err - } - nonce := state.GetNonce(internal) - args.Nonce = (*hexutil.Uint64)(&nonce) + internal, err := args.from(b.NodeLocation()).InternalAndQuaiAddress() + if err != nil { + return nil, err } + nonce := state.GetNonce(internal) + args.Nonce = (*hexutil.Uint64)(&nonce) // Ignore provided nonce, reset to correct nonce // Get a new instance of the EVM. msg, err := args.ToMessage(globalGasCap, header.BaseFee(), b.NodeLocation()) @@ -706,14 +704,14 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr hi = uint64(*args.Gas) } else { // Retrieve the block to act as the gas ceiling - block, err := b.BlockByNumberOrHash(ctx, blockNrOrHash) + header, err := b.HeaderByNumberOrHash(ctx, blockNrOrHash) if err != nil { return 0, err } - if block == nil { + if header == nil { return 0, errors.New("block not found") } - hi = block.GasLimit() + hi = header.GasLimit() if hi == 0 { hi = params.GasCeil } @@ -904,22 +902,26 @@ func FormatLogs(logs []vm.StructLog) []StructLogRes { } // RPCMarshalHeader converts the given header to the RPC output . -func RPCMarshalETHHeader(head *types.Header) map[string]interface{} { +func RPCMarshalETHHeader(head *types.Header, woHeader *types.WorkObjectHeader) map[string]interface{} { result := map[string]interface{}{ - "number": (*hexutil.Big)(head.Number(common.ZONE_CTX)), - "hash": head.Hash(), - "parentHash": head.ParentHash, - "uncleHash": head.UncleHash, - "evmRoot": head.EVMRoot, + "number": (*hexutil.Big)(woHeader.Number()), + "hash": woHeader.Hash(), + "parentHash": woHeader.ParentHash(), + "nonce": woHeader.Nonce(), + "mixHash": woHeader.MixHash(), + "sha3Uncles": head.UncleHash().String(), + "logsBloom": types.LegacyBloom{}, + "stateRoot": head.EVMRoot(), + "miner": woHeader.PrimaryCoinbase(), + "difficulty": (*hexutil.Big)(woHeader.Difficulty()), "extraData": hexutil.Bytes(head.Extra()), - "size": hexutil.Uint64(head.Size()), "gasLimit": hexutil.Uint64(head.GasLimit()), "gasUsed": hexutil.Uint64(head.GasUsed()), - "baseFee": hexutil.Big(*head.BaseFee()), - "transactionsRoot": head.TxHash, - "receiptsRoot": head.ReceiptHash, + "timestamp": hexutil.Uint64(woHeader.Time()), + "transactionsRoot": head.TxHash().String(), + "receiptsRoot": head.ReceiptHash().String(), + "baseFeePerGas": (*hexutil.Big)(head.BaseFee()), } - return result } @@ -927,7 +929,7 @@ func RPCMarshalETHHeader(head *types.Header) map[string]interface{} { // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. func RPCMarshalETHBlock(block *types.WorkObject, inclTx bool, fullTx bool, nodeLocation common.Location) (map[string]interface{}, error) { - fields := RPCMarshalETHHeader(block.Header()) + fields := RPCMarshalETHHeader(block.Header(), block.WorkObjectHeader()) fields["size"] = hexutil.Uint64(block.Size()) if inclTx { @@ -939,7 +941,7 @@ func RPCMarshalETHBlock(block *types.WorkObject, inclTx bool, fullTx bool, nodeL return newRPCTransactionFromBlockHash(block, tx.Hash(), false, nodeLocation), nil } } - txs := block.Transactions() + txs := block.TransactionsWithReceipts() transactions := make([]interface{}, len(txs)) var err error for i, tx := range txs { @@ -959,14 +961,6 @@ func RPCMarshalETHBlock(block *types.WorkObject, inclTx bool, fullTx bool, nodeL return fields, nil } -// rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires -// a `PublicBlockchainAPI`. -func (s *PublicBlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.WorkObject) map[string]interface{} { - fields := RPCMarshalETHHeader(header.Header()) - fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogEntropy(header)) - return fields -} - // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires // a `PublicBlockchainAPI`. func (s *PublicBlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.WorkObject, inclTx bool, fullTx bool) (map[string]interface{}, error) { @@ -998,25 +992,14 @@ type RPCTransaction struct { V *hexutil.Big `json:"v,omitempty"` R *hexutil.Big `json:"r,omitempty"` S *hexutil.Big `json:"s,omitempty"` - TxIn []RPCTxIn `json:"inputs,omitempty"` - TxOut []RPCTxOut `json:"outputs,omitempty"` + TxIn []types.RPCTxIn `json:"inputs,omitempty"` + TxOut []types.RPCTxOut `json:"outputs,omitempty"` UTXOSignature hexutil.Bytes `json:"utxoSignature,omitempty"` OriginatingTxHash *common.Hash `json:"originatingTxHash,omitempty"` ETXIndex *hexutil.Uint64 `json:"etxIndex,omitempty"` ETxType *hexutil.Uint64 `json:"etxType,omitempty"` } -type RPCTxIn struct { - PreviousOutPoint types.OutpointJSON `json:"previousOutPoint"` - PubKey hexutil.Bytes `json:"pubKey"` -} - -type RPCTxOut struct { - Denomination hexutil.Uint `json:"denomination"` - Address hexutil.Bytes `json:"address"` - Lock *hexutil.Big `json:"lock"` -} - // newRPCTransaction returns a transaction that will serialize to the RPC // representation, with the given location metadata set (if available). func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int, nodeLocation common.Location) *RPCTransaction { @@ -1035,10 +1018,10 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber UTXOSignature: hexutil.Bytes(sig), } for _, txin := range tx.TxIn() { - result.TxIn = append(result.TxIn, RPCTxIn{PreviousOutPoint: types.OutpointJSON{TxHash: txin.PreviousOutPoint.TxHash, Index: hexutil.Uint64(txin.PreviousOutPoint.Index)}, PubKey: hexutil.Bytes(txin.PubKey)}) + result.TxIn = append(result.TxIn, types.RPCTxIn{PreviousOutPoint: types.OutpointJSON{TxHash: txin.PreviousOutPoint.TxHash, Index: hexutil.Uint64(txin.PreviousOutPoint.Index)}, PubKey: hexutil.Bytes(txin.PubKey)}) } for _, txout := range tx.TxOut() { - result.TxOut = append(result.TxOut, RPCTxOut{Denomination: hexutil.Uint(txout.Denomination), Address: hexutil.Bytes(txout.Address), Lock: (*hexutil.Big)(txout.Lock)}) + result.TxOut = append(result.TxOut, types.RPCTxOut{Denomination: hexutil.Uint(txout.Denomination), Address: common.BytesToAddress(txout.Address, nodeLocation).MixedcaseAddress(), Lock: (*hexutil.Big)(txout.Lock)}) } if blockHash != (common.Hash{}) { result.BlockHash = &blockHash @@ -1212,7 +1195,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH nogas := args.Gas == nil // Ensure any missing fields are filled, extract the recipient and input data - if err := args.setDefaults(ctx, b); err != nil { + if err := args.setDefaults(ctx, b, db); err != nil { return nil, 0, nil, err } var to common.Address @@ -1246,7 +1229,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH // and it's convered by the sender only anyway. if nogas { args.Gas = nil - if err := args.setDefaults(ctx, b); err != nil { + if err := args.setDefaults(ctx, b, db); err != nil { return nil, 0, nil, err // shouldn't happen, just in case } } @@ -1314,9 +1297,9 @@ func (s *PublicTransactionPoolAPI) GetBlockTransactionCountByHash(ctx context.Co } // GetTransactionByBlockNumberAndIndex returns the transaction for the given block number and index. -func (s *PublicTransactionPoolAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint, nodeLocation common.Location) *RPCTransaction { +func (s *PublicTransactionPoolAPI) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) *RPCTransaction { if block, _ := s.b.BlockByNumber(ctx, blockNr); block != nil { - return newRPCTransactionFromBlockIndex(block, uint64(index), false, nodeLocation) + return newRPCTransactionFromBlockIndex(block, uint64(index), false, s.b.NodeLocation()) } return nil } @@ -1444,7 +1427,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha "cumulativeGasUsed": hexutil.Uint64(receipt.CumulativeGasUsed), "contractAddress": nil, "logs": receipt.Logs, - "logsBloom": receipt.Bloom, + "logsBloom": receipt.Bloom.ToLegacyBloom(), "type": hexutil.Uint(tx.Type()), } @@ -1661,6 +1644,10 @@ func (api *PrivateDebugAPI) ChaindbCompact() error { return nil } +func NewPublicNetAPI(networkVersion uint64) *PublicNetAPI { + return &PublicNetAPI{networkVersion} +} + // PublicNetAPI offers network related RPC methods type PublicNetAPI struct { networkVersion uint64 diff --git a/internal/quaiapi/backend.go b/internal/quaiapi/backend.go index 3c4be44f3e..7676c19360 100644 --- a/internal/quaiapi/backend.go +++ b/internal/quaiapi/backend.go @@ -66,7 +66,9 @@ type Backend interface { StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.WorkObject, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.WorkObject, error) AddressOutpoints(ctx context.Context, address common.Address) ([]*types.OutpointAndDenomination, error) + AddressLockups(ctx context.Context, address common.Address) ([]*types.Lockup, error) GetOutpointsByAddressAndRange(ctx context.Context, address common.Address, start, end uint32) ([]*types.OutpointAndDenomination, error) + GetLockupsByAddressAndRange(ctx context.Context, address common.Address, start, end uint32) ([]*types.Lockup, error) UTXOsByAddress(ctx context.Context, address common.Address) ([]*types.UtxoEntry, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.WorkObject, parent *types.WorkObject, vmConfig *vm.Config) (*vm.EVM, func() error, error) @@ -194,6 +196,12 @@ func GetAPIs(apiBackend Backend) []rpc.API { Version: "1.0", Service: NewPrivateDebugAPI(apiBackend), }, + { + Namespace: "net", + Version: "1.0", + Service: NewPublicNetAPI(apiBackend.ChainConfig().ChainID.Uint64()), + Public: true, + }, } if nodeCtx == common.ZONE_CTX { apis = append(apis, rpc.API{ @@ -217,7 +225,7 @@ func GetAPIs(apiBackend Backend) []rpc.API { apis = append(apis, rpc.API{ Namespace: "workshare", Version: "1.0", - Service: NewPublicWorkSharesAPI(apis[6].Service.(*PublicTransactionPoolAPI), apiBackend), + Service: NewPublicWorkSharesAPI(apis[7].Service.(*PublicTransactionPoolAPI), apiBackend), Public: true, }) } diff --git a/internal/quaiapi/quai_api.go b/internal/quaiapi/quai_api.go index 79ee5d41fe..dd0bc4f2d4 100644 --- a/internal/quaiapi/quai_api.go +++ b/internal/quaiapi/quai_api.go @@ -33,6 +33,7 @@ import ( "github.com/dominant-strategies/go-quai/crypto" "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/metrics_config" + "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/rpc" "github.com/dominant-strategies/go-quai/trie" "google.golang.org/protobuf/proto" @@ -126,24 +127,22 @@ func (s *PublicBlockChainQuaiAPI) GetBalance(ctx context.Context, address common return (*hexutil.Big)(big.NewInt(0)), errors.New("qi balance query is only supported for the current block") } - utxos, err := s.b.UTXOsByAddress(ctx, addr) + utxos, err := s.b.AddressOutpoints(ctx, addr) if utxos == nil || err != nil { - return nil, err + return (*hexutil.Big)(big.NewInt(0)), err } if len(utxos) == 0 { return (*hexutil.Big)(big.NewInt(0)), nil } - var balance *big.Int + balance := big.NewInt(0) for _, utxo := range utxos { - denomination := utxo.Denomination - value := types.Denominations[denomination] - if balance == nil { - balance = new(big.Int).Set(value) - } else { - balance.Add(balance, value) + if utxo.Lock != nil && currHeader.Number(nodeCtx).Cmp(utxo.Lock) < 0 { + continue } + value := types.Denominations[utxo.Denomination] + balance.Add(balance, value) } return (*hexutil.Big)(balance), nil } else { @@ -155,7 +154,51 @@ func (s *PublicBlockChainQuaiAPI) GetBalance(ctx context.Context, address common } } +func (s *PublicBlockChainQuaiAPI) GetLockedBalance(ctx context.Context, address common.MixedcaseAddress) (*hexutil.Big, error) { + nodeCtx := s.b.NodeCtx() + if nodeCtx != common.ZONE_CTX { + return nil, errors.New("getBalance call can only be made in zone chain") + } + if !s.b.ProcessingState() { + return nil, errors.New("getBalance call can only be made on chain processing the state") + } + + addr := common.Bytes20ToAddress(address.Address().Bytes20(), s.b.NodeLocation()) + if addr.IsInQiLedgerScope() { + currHeader := s.b.CurrentHeader() + utxos, err := s.b.AddressOutpoints(ctx, addr) + if utxos == nil || err != nil { + return (*hexutil.Big)(big.NewInt(0)), err + } + if len(utxos) == 0 { + return (*hexutil.Big)(big.NewInt(0)), nil + } + lockedBalance := big.NewInt(0) + for _, utxo := range utxos { + if utxo.Lock != nil && currHeader.Number(nodeCtx).Cmp(utxo.Lock) < 0 { + value := types.Denominations[utxo.Denomination] + lockedBalance.Add(lockedBalance, value) + } + } + return (*hexutil.Big)(lockedBalance), nil + } else if addr.IsInQuaiLedgerScope() { + lockups, err := s.b.AddressLockups(ctx, addr) + if lockups == nil || err != nil { + return (*hexutil.Big)(big.NewInt(0)), err + } + lockedBalance := big.NewInt(0) + for _, lockup := range lockups { + lockedBalance.Add(lockedBalance, lockup.Value) + } + return (*hexutil.Big)(lockedBalance), nil + } + return nil, nil +} + func (s *PublicBlockChainQuaiAPI) GetOutpointsByAddress(ctx context.Context, address common.Address) ([]interface{}, error) { + if address.IsInQuaiLedgerScope() { + return nil, fmt.Errorf("address %s is in Quai ledger scope", address.Hex()) + } outpoints, err := s.b.AddressOutpoints(ctx, address) if err != nil { return nil, err @@ -165,6 +208,9 @@ func (s *PublicBlockChainQuaiAPI) GetOutpointsByAddress(ctx context.Context, add if outpoint == nil { continue } + if rawdb.GetUTXO(s.b.Database(), outpoint.TxHash, outpoint.Index) == nil { + continue + } lock := big.NewInt(0) if outpoint.Lock != nil { lock = outpoint.Lock @@ -181,6 +227,50 @@ func (s *PublicBlockChainQuaiAPI) GetOutpointsByAddress(ctx context.Context, add return jsonOutpoints, nil } +func (s *PublicBlockChainQuaiAPI) GetLockupsByAddress(ctx context.Context, address common.Address) ([]interface{}, error) { + if address.IsInQiLedgerScope() { + return nil, fmt.Errorf("address %s is in Qi ledger scope", address.Hex()) + } + lockups, err := s.b.AddressLockups(ctx, address) + if err != nil { + return nil, err + } + jsonLockups := make([]interface{}, 0, len(lockups)) + for _, lockup := range lockups { + jsonLockup := map[string]interface{}{ + "value": hexutil.Big(*lockup.Value), + "unlockHeight": hexutil.Uint64(lockup.UnlockHeight), + } + jsonLockups = append(jsonLockups, jsonLockup) + } + return jsonLockups, nil +} + +func (s *PublicBlockChainQuaiAPI) GetLockupsByAddressAndRange(ctx context.Context, address common.Address, start, end hexutil.Uint64) ([]interface{}, error) { + if address.IsInQiLedgerScope() { + return nil, fmt.Errorf("address %s is in Qi ledger scope", address.Hex()) + } + if start > end { + return nil, fmt.Errorf("start is greater than end") + } + if uint32(end)-uint32(start) > maxOutpointsRange { + return nil, fmt.Errorf("range is too large, max range is %d", maxOutpointsRange) + } + lockups, err := s.b.GetLockupsByAddressAndRange(ctx, address, uint32(start), uint32(end)) + if err != nil { + return nil, err + } + jsonLockups := make([]interface{}, 0, len(lockups)) + for _, lockup := range lockups { + jsonLockup := map[string]interface{}{ + "value": hexutil.Big(*lockup.Value), + "unlockHeight": hexutil.Uint64(lockup.UnlockHeight), + } + jsonLockups = append(jsonLockups, jsonLockup) + } + return jsonLockups, nil +} + func (s *PublicBlockChainQuaiAPI) GetOutPointsByAddressAndRange(ctx context.Context, address common.Address, start, end hexutil.Uint64) (map[string][]interface{}, error) { if start > end { return nil, fmt.Errorf("start is greater than end") @@ -188,6 +278,9 @@ func (s *PublicBlockChainQuaiAPI) GetOutPointsByAddressAndRange(ctx context.Cont if uint32(end)-uint32(start) > maxOutpointsRange { return nil, fmt.Errorf("range is too large, max range is %d", maxOutpointsRange) } + if address.IsInQuaiLedgerScope() { + return nil, fmt.Errorf("address %s is in Quai ledger scope", address.Hex()) + } outpoints, err := s.b.GetOutpointsByAddressAndRange(ctx, address, uint32(start), uint32(end)) if err != nil { return nil, err @@ -197,6 +290,9 @@ func (s *PublicBlockChainQuaiAPI) GetOutPointsByAddressAndRange(ctx context.Cont if outpoint == nil { continue } + if rawdb.GetUTXO(s.b.Database(), outpoint.TxHash, outpoint.Index) == nil { + continue + } lock := big.NewInt(0) if outpoint.Lock != nil { lock = outpoint.Lock @@ -212,6 +308,227 @@ func (s *PublicBlockChainQuaiAPI) GetOutPointsByAddressAndRange(ctx context.Cont return txHashToOutpointsJson, nil } +func (s *PublicBlockChainQuaiAPI) GetOutpointDeltasForAddressesInRange(ctx context.Context, addresses []common.Address, from, to common.Hash) (map[string]map[string]map[string][]interface{}, error) { + if s.b.NodeCtx() != common.ZONE_CTX { + return nil, errors.New("getOutpointDeltasForAddressesInRange can only be called in a zone chain") + } + nodeCtx := common.ZONE_CTX + if len(addresses) == 0 { + return nil, fmt.Errorf("addresses cannot be empty") + } + blockFrom := s.b.BlockOrCandidateByHash(from) + if blockFrom == nil { + return nil, fmt.Errorf("block %s not found", from.Hex()) + } + blockTo := s.b.BlockOrCandidateByHash(to) + if blockTo == nil { + return nil, fmt.Errorf("block %s not found", to.Hex()) + } + if blockFrom.NumberU64(nodeCtx) > blockTo.NumberU64(nodeCtx) { + return nil, fmt.Errorf("from block number is greater than to block number") + } + if uint32(blockTo.NumberU64(nodeCtx))-uint32(blockFrom.NumberU64(nodeCtx)) > maxOutpointsRange { + return nil, fmt.Errorf("range is too large, max range is %d", maxOutpointsRange) + } + addressMap := make(map[common.AddressBytes]struct{}) + for _, address := range addresses { + addressMap[address.Bytes20()] = struct{}{} + } + addressToCreatedDeletedToTxHashToOutputs := make(map[string]map[string]map[string][]interface{}) + for _, address := range addresses { + addressToCreatedDeletedToTxHashToOutputs[address.String()] = make(map[string]map[string][]interface{}) + addressToCreatedDeletedToTxHashToOutputs[address.String()]["created"] = make(map[string][]interface{}) + addressToCreatedDeletedToTxHashToOutputs[address.String()]["deleted"] = make(map[string][]interface{}) + } + commonBlock := blockFrom + if blockFrom.Hash() != rawdb.ReadCanonicalHash(s.b.Database(), blockFrom.NumberU64(nodeCtx)) || blockTo.Hash() != rawdb.ReadCanonicalHash(s.b.Database(), blockTo.NumberU64(nodeCtx)) { + // One (or both) of the blocks are not canonical, find common ancestor + commonAncestor, err := rawdb.FindCommonAncestor(s.b.Database(), blockFrom, blockTo, nodeCtx) + if err != nil { + return nil, err + } + if commonAncestor == nil { + return nil, fmt.Errorf("no common ancestor found") + } + commonAncestorBlock := s.b.BlockOrCandidateByHash(commonAncestor.Hash()) + if commonAncestorBlock == nil { + return nil, fmt.Errorf("block %s not found", to.Hex()) + } + commonBlock = commonAncestorBlock + } + currentBlock := blockTo + for currentBlock.Hash() != commonBlock.Hash() { // Get deltas from blockTo to common ancestor + err := GetDeltas(s, currentBlock, addressMap, addressToCreatedDeletedToTxHashToOutputs) + if err != nil { + return nil, err + } + currentBlock = s.b.BlockOrCandidateByHash(currentBlock.ParentHash(nodeCtx)) + if currentBlock == nil { + return nil, fmt.Errorf("block %s not found", currentBlock.ParentHash(nodeCtx).Hex()) + } + } + if commonBlock.Hash() != blockFrom.Hash() { // Get deltas from blockFrom to common ancestor + currentBlock = blockFrom + for currentBlock.Hash() != commonBlock.Hash() { + err := GetDeltas(s, currentBlock, addressMap, addressToCreatedDeletedToTxHashToOutputs) + if err != nil { + return nil, err + } + currentBlock = s.b.BlockOrCandidateByHash(currentBlock.ParentHash(nodeCtx)) + if currentBlock == nil { + return nil, fmt.Errorf("block %s not found", currentBlock.ParentHash(nodeCtx).Hex()) + } + } + } + err := GetDeltas(s, commonBlock, addressMap, addressToCreatedDeletedToTxHashToOutputs) // Get deltas from common ancestor + if err != nil { + return nil, err + } + + return addressToCreatedDeletedToTxHashToOutputs, nil +} + +func GetDeltas(s *PublicBlockChainQuaiAPI, currentBlock *types.WorkObject, addressMap map[common.AddressBytes]struct{}, addressToCreatedDeletedToTxHashToOutputs map[string]map[string]map[string][]interface{}) error { + nodeCtx := common.ZONE_CTX + // Grab spent UTXOs for this block + // Eventually spent UTXOs should be pruned, so this data might not be available + sutxos, err := rawdb.ReadSpentUTXOs(s.b.Database(), currentBlock.Hash()) + if err != nil { + return err + } + trimmedUtxos, err := rawdb.ReadTrimmedUTXOs(s.b.Database(), currentBlock.Hash()) + if err != nil { + return err + } + sutxos = append(sutxos, trimmedUtxos...) + for _, sutxo := range sutxos { + if _, ok := addressMap[common.AddressBytes(sutxo.Address)]; ok { + lock := big.NewInt(0) + if sutxo.Lock != nil { + lock = sutxo.Lock + } + addressToCreatedDeletedToTxHashToOutputs[common.AddressBytes(sutxo.Address).String()]["deleted"][sutxo.TxHash.String()] = + append(addressToCreatedDeletedToTxHashToOutputs[common.AddressBytes(sutxo.Address).String()]["deleted"][sutxo.TxHash.String()], map[string]interface{}{ + "index": hexutil.Uint64(sutxo.Index), + "denomination": hexutil.Uint64(sutxo.Denomination), + "lock": hexutil.Big(*lock), + }) + } + } + + for _, tx := range currentBlock.Transactions() { + if tx.Type() == types.QiTxType { + for i, out := range tx.TxOut() { + if common.BytesToAddress(out.Address, common.Location{0, 0}).IsInQuaiLedgerScope() { + // This is a conversion output + continue + } + if _, ok := addressMap[common.AddressBytes(out.Address)]; !ok { + continue + } + lock := big.NewInt(0) + if out.Lock != nil { + lock = out.Lock + } + addressToCreatedDeletedToTxHashToOutputs[common.AddressBytes(out.Address).String()]["created"][tx.Hash().String()] = + append(addressToCreatedDeletedToTxHashToOutputs[common.AddressBytes(out.Address).String()]["created"][tx.Hash().String()], map[string]interface{}{ + "index": hexutil.Uint64(i), + "denomination": hexutil.Uint64(out.Denomination), + "lock": hexutil.Big(*lock), + }) + } + } else if tx.Type() == types.ExternalTxType && tx.EtxType() == types.CoinbaseType && tx.To().IsInQiLedgerScope() { + if len(tx.Data()) == 0 { + continue + } + if _, ok := addressMap[common.AddressBytes(tx.To().Bytes20())]; !ok { + continue + } + lockupByte := tx.Data()[0] + // After the BigSporkFork the minimum conversion period changes to 7200 blocks + var lockup *big.Int + if lockupByte == 0 { + if currentBlock.NumberU64(nodeCtx) < params.GoldenAgeForkNumberV1 { + lockup = new(big.Int).SetUint64(params.OldConversionLockPeriod) + } else { + lockup = new(big.Int).SetUint64(params.NewConversionLockPeriod) + } + } else { + lockup = new(big.Int).SetUint64(params.LockupByteToBlockDepth[lockupByte]) + } + lockup.Add(lockup, currentBlock.Number(nodeCtx)) + value := params.CalculateCoinbaseValueWithLockup(tx.Value(), lockupByte) + denominations := misc.FindMinDenominations(value) + outputIndex := uint16(0) + // Iterate over the denominations in descending order + for denomination := types.MaxDenomination; denomination >= 0; denomination-- { + // If the denomination count is zero, skip it + if denominations[uint8(denomination)] == 0 { + continue + } + for j := uint64(0); j < denominations[uint8(denomination)]; j++ { + if outputIndex >= types.MaxOutputIndex { + // No more gas, the rest of the denominations are lost but the tx is still valid + break + } + + addressToCreatedDeletedToTxHashToOutputs[tx.To().String()]["created"][tx.Hash().String()] = + append(addressToCreatedDeletedToTxHashToOutputs[tx.To().String()]["created"][tx.Hash().String()], map[string]interface{}{ + "index": hexutil.Uint64(outputIndex), + "denomination": hexutil.Uint64(uint8(denomination)), + "lock": hexutil.Big(*lockup), + }) + outputIndex++ + } + } + } else if tx.Type() == types.ExternalTxType && tx.EtxType() == types.ConversionType && tx.To().IsInQiLedgerScope() { + if _, ok := addressMap[common.AddressBytes(tx.To().Bytes20())]; !ok { + continue + } + var lockup *big.Int + if currentBlock.NumberU64(nodeCtx) < params.GoldenAgeForkNumberV1 { + lockup = new(big.Int).SetUint64(params.OldConversionLockPeriod) + } else { + lockup = new(big.Int).SetUint64(params.NewConversionLockPeriod) + } + lockup.Add(lockup, currentBlock.Number(nodeCtx)) + value := tx.Value() + txGas := tx.Gas() + if txGas < params.TxGas { + continue + } + txGas -= params.TxGas + denominations := misc.FindMinDenominations(value) + outputIndex := uint16(0) + // Iterate over the denominations in descending order + for denomination := types.MaxDenomination; denomination >= 0; denomination-- { + // If the denomination count is zero, skip it + if denominations[uint8(denomination)] == 0 { + continue + } + for j := uint64(0); j < denominations[uint8(denomination)]; j++ { + if txGas < params.CallValueTransferGas || outputIndex >= types.MaxOutputIndex { + // No more gas, the rest of the denominations are lost but the tx is still valid + break + } + txGas -= params.CallValueTransferGas + // the ETX hash is guaranteed to be unique + + addressToCreatedDeletedToTxHashToOutputs[tx.To().String()]["created"][tx.Hash().String()] = + append(addressToCreatedDeletedToTxHashToOutputs[tx.To().String()]["created"][tx.Hash().String()], map[string]interface{}{ + "index": hexutil.Uint64(outputIndex), + "denomination": hexutil.Uint64(uint8(denomination)), + "lock": hexutil.Big(*lockup), + }) + + outputIndex++ + } + } + } + } + return nil +} + func (s *PublicBlockChainQuaiAPI) GetUTXO(ctx context.Context, txHash common.Hash, index hexutil.Uint64) (map[string]interface{}, error) { utxo := rawdb.GetUTXO(s.b.Database(), txHash, uint16(index)) if utxo == nil { @@ -508,6 +825,44 @@ func (s *PublicBlockChainQuaiAPI) EstimateGas(ctx context.Context, args Transact if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } + + if args.from(s.b.NodeLocation()).IsInQuaiLedgerScope() && args.To != nil && args.To.IsInQiLedgerScope() { + // Conversion transaction + var header *types.WorkObject + var err error + if blockNr, ok := blockNrOrHash.Number(); ok { + if blockNr == rpc.LatestBlockNumber { + header = s.b.CurrentHeader() + } else { + header, err = s.b.HeaderByNumber(ctx, rpc.BlockNumber(blockNr)) + } + } else if hash, ok := blockNrOrHash.Hash(); ok { + header, err = s.b.HeaderByHash(ctx, hash) + } else { + return 0, errors.New("invalid block number or hash") + } + if err != nil { + return 0, err + } + estimatedQiAmount := misc.QuaiToQi(header, args.Value.ToInt()) + usedGas := uint64(0) + + usedGas += params.TxGas + denominations := misc.FindMinDenominations(estimatedQiAmount) + outputIndex := uint16(0) + // Iterate over the denominations in descending order + for denomination := types.MaxDenomination; denomination >= 0; denomination-- { + // If the denomination count is zero, skip it + if denominations[uint8(denomination)] == 0 { + continue + } + for j := uint64(0); j < denominations[uint8(denomination)]; j++ { + usedGas += params.CallValueTransferGas + outputIndex++ + } + } + return hexutil.Uint64(usedGas), nil + } switch args.TxType { case types.QiTxType: block, err := s.b.BlockByNumberOrHash(ctx, bNrOrHash) diff --git a/internal/quaiapi/transaction_args.go b/internal/quaiapi/transaction_args.go index 84e75907c1..eacda80a0e 100644 --- a/internal/quaiapi/transaction_args.go +++ b/internal/quaiapi/transaction_args.go @@ -26,6 +26,7 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/common/hexutil" "github.com/dominant-strategies/go-quai/common/math" + "github.com/dominant-strategies/go-quai/core/state" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/rpc" @@ -52,9 +53,9 @@ type TransactionArgs struct { ChainID *hexutil.Big `json:"chainId,omitempty"` // Support for Qi (UTXO) transaction - TxIn types.TxIns `json:"txIn,omitempty"` - TxOut types.TxOuts `json:"txOut,omitempty"` - TxType uint8 `json:"txType,omitempty"` + TxIn []types.RPCTxIn `json:"txIn,omitempty"` + TxOut []types.RPCTxOut `json:"txOut,omitempty"` + TxType uint8 `json:"txType,omitempty"` } // from retrieves the transaction sender address. @@ -77,7 +78,7 @@ func (arg *TransactionArgs) data() []byte { } // setDefaults fills in default values for unspecified tx fields. -func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { +func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, db *state.StateDB) error { head := b.CurrentHeader() if args.MinerTip == nil { @@ -97,13 +98,13 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { if args.Value == nil { args.Value = new(hexutil.Big) } - if args.Nonce == nil { - nonce, err := b.GetPoolNonce(ctx, args.from(b.NodeLocation())) - if err != nil { - return err - } - args.Nonce = (*hexutil.Uint64)(&nonce) + internal, err := args.from(b.NodeLocation()).InternalAddress() + if err != nil { + return err } + nonce := db.GetNonce(internal) + args.Nonce = (*hexutil.Uint64)(&nonce) // Ignore provided nonce, reset to correct nonce + if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`) } @@ -167,12 +168,8 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int, no gasPrice *big.Int minerTip *big.Int ) - // User specified max fee (or none), use those - gasPrice = new(big.Int) - if args.GasPrice != nil { - gasPrice = args.GasPrice.ToInt() - } - minerTip = gasPrice + gasPrice = new(big.Int).Set(common.Big0) // Skip base fee check in state_transition.go + minerTip = gasPrice // Skip base fee check in state_transition.go value := new(big.Int) if args.Value != nil { value = args.Value.ToInt() @@ -182,6 +179,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int, no if args.AccessList != nil { accessList = *args.AccessList } + msg := types.NewMessage(addr, args.To, uint64(*args.Nonce), value, gas, gasPrice, minerTip, data, accessList, false) return msg, nil } @@ -194,11 +192,34 @@ func (args *TransactionArgs) CalculateQiTxGas(qiScalingFactor float64, location if len(args.TxIn) == 0 || len(args.TxOut) == 0 { return 0, errors.New("Qi transaction must have at least one input and one output") + } else if len(args.TxIn) > types.MaxOutputIndex { + return 0, fmt.Errorf("Qi transaction has too many inputs: %d", len(args.TxIn)) + } + ins := make([]types.TxIn, len(args.TxIn)) + outs := make([]types.TxOut, len(args.TxOut)) + for i, in := range args.TxIn { + if in.PreviousOutPoint.Index > types.MaxOutputIndex { + return 0, fmt.Errorf("Qi transaction has an input with an index too large: %d", in.PreviousOutPoint.Index) + } + ins[i] = types.TxIn{ + PreviousOutPoint: types.OutPoint{ + TxHash: in.PreviousOutPoint.TxHash, + Index: uint16(in.PreviousOutPoint.Index), + }, + PubKey: in.PubKey, + } + } + for i, out := range args.TxOut { + outs[i] = types.TxOut{ + Denomination: uint8(out.Denomination), + Address: out.Address.Address().Bytes(), + Lock: out.Lock.ToInt(), + } } qiTx := &types.QiTx{ - TxIn: args.TxIn, - TxOut: args.TxOut, + TxIn: ins, + TxOut: outs, } tx := types.NewTx(qiTx) diff --git a/node/rpcstack.go b/node/rpcstack.go index 3a01ae5d59..5e6d84de28 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -499,6 +499,7 @@ func RegisterApis(apis []rpc.API, modules []string, srv *rpc.Server, exposeAll b for _, module := range modules { allowList[module] = true } + allowList["net"] = true // Register all the APIs exposed by the services for _, api := range apis { if exposeAll || allowList[api.Namespace] || (len(allowList) == 0 && api.Public) { diff --git a/quai/api_backend.go b/quai/api_backend.go index 6041cc7a1d..18503a2e63 100644 --- a/quai/api_backend.go +++ b/quai/api_backend.go @@ -214,10 +214,18 @@ func (b *QuaiAPIBackend) GetOutpointsByAddressAndRange(ctx context.Context, addr return b.quai.core.GetOutpointsByAddressAndRange(address, start, end) } +func (b QuaiAPIBackend) GetLockupsByAddressAndRange(ctx context.Context, address common.Address, start, end uint32) ([]*types.Lockup, error) { + return b.quai.core.GetLockupsByAddressAndRange(address, start, end) +} + func (b *QuaiAPIBackend) AddressOutpoints(ctx context.Context, address common.Address) ([]*types.OutpointAndDenomination, error) { return b.quai.core.GetOutpointsByAddress(address) } +func (b *QuaiAPIBackend) AddressLockups(ctx context.Context, address common.Address) ([]*types.Lockup, error) { + return b.quai.core.GetLockupsByAddress(address) +} + func (b *QuaiAPIBackend) UTXOsByAddress(ctx context.Context, address common.Address) ([]*types.UtxoEntry, error) { return b.quai.core.GetUTXOsByAddress(address) } diff --git a/quai/filters/api.go b/quai/filters/api.go index 2f69ac1724..a306ed23dc 100644 --- a/quai/filters/api.go +++ b/quai/filters/api.go @@ -40,6 +40,7 @@ import ( const ( c_pendingHeaderChSize = 20 + MaxFilterRange = 10000 ) // filter is a helper struct that holds meta information over the filter type @@ -535,6 +536,11 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([ if crit.ToBlock != nil { end = crit.ToBlock.Int64() } + if end != rpc.LatestBlockNumber.Int64() && begin > end { + return nil, errors.New("fromBlock must be less than or equal to toBlock") + } else if end != rpc.LatestBlockNumber.Int64() && end-begin > MaxFilterRange { + return nil, fmt.Errorf("filter range must be less than or equal to %d", MaxFilterRange) + } // Construct the range filter filter = NewRangeFilter(api.backend, begin, end, addresses, crit.Topics, api.backend.Logger()) } diff --git a/quai/filters/filter.go b/quai/filters/filter.go index f61d14e791..10d0e51827 100644 --- a/quai/filters/filter.go +++ b/quai/filters/filter.go @@ -19,7 +19,9 @@ package filters import ( "context" "errors" + "fmt" "math/big" + "sync" "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core" @@ -122,8 +124,8 @@ func newFilter(backend Backend, addresses []common.Address, topics [][]common.Ha } } -// Logs searches the blockchain for matching log entries, returning all from the -// first block that contains matches, updating the start of the filter accordingly. +// Logs searches the blockchain for matching log entries by manually checking +// each block within the specified range using 8 goroutines for parallel processing. func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // If we're doing singleton block filtering, execute and return if f.block != (common.Hash{}) { @@ -136,7 +138,8 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return f.blockLogs(ctx, workObject) } - // Figure out the limits of the filter range + + // Determine the limits of the filter range header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) if header == nil { return nil, nil @@ -150,25 +153,92 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { if f.end == -1 { end = head } - // Gather all indexed logs, and finish with non indexed ones + if end > uint64(f.begin) && end-uint64(f.begin) > MaxFilterRange { + return nil, fmt.Errorf("filter range exceeds maximum limit of %d blocks", MaxFilterRange) + } + + // Prepare to collect logs from goroutines var ( - logs []*types.Log - err error + logs []*types.Log + logsLock sync.Mutex + wg sync.WaitGroup + errChan = make(chan error, 8) ) - size, sections := f.backend.BloomStatus() - if indexed := sections * size; indexed > uint64(f.begin) { - if indexed > end { - logs, err = f.indexedLogs(ctx, end) - } else { - logs, err = f.indexedLogs(ctx, indexed-1) - } + + totalBlocks := int64(end) - f.begin + 1 + if totalBlocks <= 0 { + return nil, nil + } + + // Adjust the number of goroutines if total blocks are fewer than 8 + numGoroutines := 8 + if totalBlocks < int64(numGoroutines) { + numGoroutines = int(totalBlocks) + } + + blocksPerGoroutine := totalBlocks / int64(numGoroutines) + remainderBlocks := totalBlocks % int64(numGoroutines) + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + + // Calculate the start and end blocks for this goroutine + startBlock := f.begin + int64(i)*blocksPerGoroutine + endBlock := startBlock + blocksPerGoroutine - 1 + if i == numGoroutines-1 { + // The last goroutine handles any remaining blocks + endBlock += remainderBlocks + } + + // Collect logs from startBlock to endBlock + var localLogs []*types.Log + for blockNum := startBlock; blockNum <= endBlock; blockNum++ { + // Check for context cancellation + select { + case <-ctx.Done(): + errChan <- ctx.Err() + return + default: + } + + workObject, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(blockNum)) + if err != nil { + errChan <- err + return + } + if workObject == nil { + continue + } + + found, err := f.blockLogs(ctx, workObject) + if err != nil { + errChan <- err + return + } + localLogs = append(localLogs, found...) + } + + // Append localLogs to the main logs slice safely + logsLock.Lock() + logs = append(logs, localLogs...) + logsLock.Unlock() + }(i) + } + + // Wait for all goroutines to finish + wg.Wait() + close(errChan) + + // Check for errors from goroutines + for err := range errChan { if err != nil { return logs, err } } - rest, err := f.unindexedLogs(ctx, end) - logs = append(logs, rest...) - return logs, err + + return logs, nil } // indexedLogs returns the logs matching the filter criteria based on the bloom @@ -239,18 +309,13 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e // blockLogs returns the logs matching the filter criteria within a single block. func (f *Filter) blockLogs(ctx context.Context, workObject *types.WorkObject) (logs []*types.Log, err error) { - // Get block bloom from the database - bloom, err := f.backend.GetBloom(workObject.Hash()) + + found, err := f.checkMatches(ctx, workObject) if err != nil { return logs, err } - if bloomFilter(*bloom, f.addresses, f.topics) { - found, err := f.checkMatches(ctx, workObject) - if err != nil { - return logs, err - } - logs = append(logs, found...) - } + logs = append(logs, found...) + return logs, nil } diff --git a/quai/filters/filter_system.go b/quai/filters/filter_system.go index 772ad0c51a..fb34732788 100644 --- a/quai/filters/filter_system.go +++ b/quai/filters/filter_system.go @@ -221,6 +221,11 @@ func (es *EventSystem) SubscribeLogs(crit quai.FilterQuery, logs chan []*types.L to = rpc.BlockNumber(crit.ToBlock.Int64()) } + // Enforce max range of 10,000 blocks + if from >= 0 && to >= 0 && to > from && to-from > MaxFilterRange { + return nil, fmt.Errorf("invalid from and to block combination: block range > %d", MaxFilterRange) + } + // only interested in pending logs if from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber { return es.subscribePendingLogs(crit, logs), nil diff --git a/quaiclient/ethclient/ethclient.go b/quaiclient/ethclient/ethclient.go index c74d7d1f90..c6cfef5e04 100644 --- a/quaiclient/ethclient/ethclient.go +++ b/quaiclient/ethclient/ethclient.go @@ -188,7 +188,7 @@ func (tx *rpcTransaction) UnmarshalJSON(msg []byte) error { // TransactionByHash returns the transaction with the given hash. func (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) { var json *rpcTransaction - err = ec.c.CallContext(ctx, &json, "eth_getTransactionByHash", hash) + err = ec.c.CallContext(ctx, &json, "quai_getTransactionByHash", hash) if err != nil { return nil, false, err } else if json == nil { @@ -256,7 +256,7 @@ func (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash, // Note that the receipt is not available for pending transactions. func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) { var r *types.Receipt - err := ec.c.CallContext(ctx, &r, "eth_getTransactionReceipt", txHash) + err := ec.c.CallContext(ctx, &r, "quai_getTransactionReceipt", txHash) if err == nil { if r == nil { return nil, quai.NotFound @@ -328,7 +328,7 @@ func (ec *Client) NetworkID(ctx context.Context) (*big.Int, error) { // The block number can be nil, in which case the balance is taken from the latest known block. func (ec *Client) BalanceAt(ctx context.Context, account common.MixedcaseAddress, blockNumber *big.Int) (*big.Int, error) { var result hexutil.Big - err := ec.c.CallContext(ctx, &result, "eth_getBalance", account.Original(), toBlockNumArg(blockNumber)) + err := ec.c.CallContext(ctx, &result, "quai_getBalance", account.Original(), toBlockNumArg(blockNumber)) return (*big.Int)(&result), err } @@ -401,7 +401,7 @@ func (ec *Client) PendingCodeAt(ctx context.Context, account common.MixedcaseAdd // This is the nonce that should be used for the next transaction. func (ec *Client) PendingNonceAt(ctx context.Context, account common.MixedcaseAddress) (uint64, error) { var result hexutil.Uint64 - err := ec.c.CallContext(ctx, &result, "eth_getTransactionCount", account.Original(), "pending") + err := ec.c.CallContext(ctx, &result, "quai_getTransactionCount", account.Original(), "pending") return uint64(result), err } @@ -432,7 +432,7 @@ func (ec *Client) GetPendingHeader(ctx context.Context) (*types.WorkObject, erro // blocks might not be available. func (ec *Client) CallContract(ctx context.Context, msg quai.CallMsg, blockNumber *big.Int) ([]byte, error) { var hex hexutil.Bytes - err := ec.c.CallContext(ctx, &hex, "eth_call", toCallArg(msg), toBlockNumArg(blockNumber)) + err := ec.c.CallContext(ctx, &hex, "quai_call", toCallArg(msg), toBlockNumArg(blockNumber)) if err != nil { return nil, err } @@ -466,7 +466,7 @@ func (ec *Client) SuggestGasPrice(ctx context.Context) (*big.Int, error) { // but it should provide a basis for setting a reasonable default. func (ec *Client) EstimateGas(ctx context.Context, msg quai.CallMsg) (uint64, error) { var hex hexutil.Uint64 - err := ec.c.CallContext(ctx, &hex, "eth_estimateGas", toCallArg(msg)) + err := ec.c.CallContext(ctx, &hex, "quai_estimateGas", toCallArg(msg)) if err != nil { return 0, err } @@ -588,6 +588,23 @@ func toCallArg(msg quai.CallMsg) interface{} { if msg.GasPrice != nil { arg["gasPrice"] = (*hexutil.Big)(msg.GasPrice) } + if msg.TxType != 0 { + arg["txType"] = msg.TxType + } + txIns := make([]types.RPCTxIn, 0, len(msg.TxIn)) + txOuts := make([]types.RPCTxOut, 0, len(msg.TxOut)) + for _, txin := range msg.TxIn { + txIns = append(txIns, types.RPCTxIn{PreviousOutPoint: types.OutpointJSON{TxHash: txin.PreviousOutPoint.TxHash, Index: hexutil.Uint64(txin.PreviousOutPoint.Index)}, PubKey: hexutil.Bytes(txin.PubKey)}) + } + for _, txout := range msg.TxOut { + txOuts = append(txOuts, types.RPCTxOut{Denomination: hexutil.Uint(txout.Denomination), Address: common.BytesToAddress(txout.Address, common.Location{0, 0}).MixedcaseAddress(), Lock: (*hexutil.Big)(txout.Lock)}) + } + if msg.TxIn != nil { + arg["txIn"] = msg.TxIn + } + if msg.TxOut != nil { + arg["txOut"] = msg.TxOut + } return arg }