From e207a5722386b65e50b007c925ddd52decb241c0 Mon Sep 17 00:00:00 2001 From: gop Date: Mon, 11 Nov 2024 15:11:59 -0600 Subject: [PATCH 1/9] Storing the best node set on stop and retrieving on launch --- cmd/utils/hc.pb.go | 228 ++++++++++++++++++++++++++ cmd/utils/hc.proto | 17 ++ cmd/utils/hierarchical_coordinator.go | 149 ++++++++++++----- core/headerchain.go | 2 +- params/config.go | 17 +- quai/backend.go | 1 + 6 files changed, 364 insertions(+), 50 deletions(-) create mode 100644 cmd/utils/hc.pb.go create mode 100644 cmd/utils/hc.proto diff --git a/cmd/utils/hc.pb.go b/cmd/utils/hc.pb.go new file mode 100644 index 0000000000..88f4fbcb19 --- /dev/null +++ b/cmd/utils/hc.pb.go @@ -0,0 +1,228 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.35.1 +// protoc v5.28.2 +// source: cmd/utils/hc.proto + +package utils + +import ( + common "github.com/dominant-strategies/go-quai/common" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ProtoNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hash *common.ProtoHash `protobuf:"bytes,1,opt,name=hash,proto3,oneof" json:"hash,omitempty"` + Number [][]byte `protobuf:"bytes,2,rep,name=number,proto3" json:"number,omitempty"` + Location *common.ProtoLocation `protobuf:"bytes,3,opt,name=location,proto3,oneof" json:"location,omitempty"` + Entropy []byte `protobuf:"bytes,4,opt,name=entropy,proto3,oneof" json:"entropy,omitempty"` +} + +func (x *ProtoNode) Reset() { + *x = ProtoNode{} + mi := &file_cmd_utils_hc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProtoNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoNode) ProtoMessage() {} + +func (x *ProtoNode) ProtoReflect() protoreflect.Message { + mi := &file_cmd_utils_hc_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoNode.ProtoReflect.Descriptor instead. +func (*ProtoNode) Descriptor() ([]byte, []int) { + return file_cmd_utils_hc_proto_rawDescGZIP(), []int{0} +} + +func (x *ProtoNode) GetHash() *common.ProtoHash { + if x != nil { + return x.Hash + } + return nil +} + +func (x *ProtoNode) GetNumber() [][]byte { + if x != nil { + return x.Number + } + return nil +} + +func (x *ProtoNode) GetLocation() *common.ProtoLocation { + if x != nil { + return x.Location + } + return nil +} + +func (x *ProtoNode) GetEntropy() []byte { + if x != nil { + return x.Entropy + } + return nil +} + +type ProtoNodeSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeSet map[string]*ProtoNode `protobuf:"bytes,1,rep,name=node_set,json=nodeSet,proto3" json:"node_set,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ProtoNodeSet) Reset() { + *x = ProtoNodeSet{} + mi := &file_cmd_utils_hc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProtoNodeSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoNodeSet) ProtoMessage() {} + +func (x *ProtoNodeSet) ProtoReflect() protoreflect.Message { + mi := &file_cmd_utils_hc_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoNodeSet.ProtoReflect.Descriptor instead. +func (*ProtoNodeSet) Descriptor() ([]byte, []int) { + return file_cmd_utils_hc_proto_rawDescGZIP(), []int{1} +} + +func (x *ProtoNodeSet) GetNodeSet() map[string]*ProtoNode { + if x != nil { + return x.NodeSet + } + return nil +} + +var File_cmd_utils_hc_proto protoreflect.FileDescriptor + +var file_cmd_utils_hc_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x6d, 0x64, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x73, 0x2f, 0x68, 0x63, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x68, 0x63, 0x1a, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4e, 0x6f, 0x64, + 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, + 0x73, 0x68, 0x48, 0x00, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x01, + 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1d, 0x0a, + 0x07, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x02, + 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x22, 0x93, + 0x01, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x12, + 0x38, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x68, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x74, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x1a, 0x49, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x68, 0x63, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, + 0x6d, 0x64, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cmd_utils_hc_proto_rawDescOnce sync.Once + file_cmd_utils_hc_proto_rawDescData = file_cmd_utils_hc_proto_rawDesc +) + +func file_cmd_utils_hc_proto_rawDescGZIP() []byte { + file_cmd_utils_hc_proto_rawDescOnce.Do(func() { + file_cmd_utils_hc_proto_rawDescData = protoimpl.X.CompressGZIP(file_cmd_utils_hc_proto_rawDescData) + }) + return file_cmd_utils_hc_proto_rawDescData +} + +var file_cmd_utils_hc_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_cmd_utils_hc_proto_goTypes = []any{ + (*ProtoNode)(nil), // 0: hc.ProtoNode + (*ProtoNodeSet)(nil), // 1: hc.ProtoNodeSet + nil, // 2: hc.ProtoNodeSet.NodeSetEntry + (*common.ProtoHash)(nil), // 3: common.ProtoHash + (*common.ProtoLocation)(nil), // 4: common.ProtoLocation +} +var file_cmd_utils_hc_proto_depIdxs = []int32{ + 3, // 0: hc.ProtoNode.hash:type_name -> common.ProtoHash + 4, // 1: hc.ProtoNode.location:type_name -> common.ProtoLocation + 2, // 2: hc.ProtoNodeSet.node_set:type_name -> hc.ProtoNodeSet.NodeSetEntry + 0, // 3: hc.ProtoNodeSet.NodeSetEntry.value:type_name -> hc.ProtoNode + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_cmd_utils_hc_proto_init() } +func file_cmd_utils_hc_proto_init() { + if File_cmd_utils_hc_proto != nil { + return + } + file_cmd_utils_hc_proto_msgTypes[0].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cmd_utils_hc_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cmd_utils_hc_proto_goTypes, + DependencyIndexes: file_cmd_utils_hc_proto_depIdxs, + MessageInfos: file_cmd_utils_hc_proto_msgTypes, + }.Build() + File_cmd_utils_hc_proto = out.File + file_cmd_utils_hc_proto_rawDesc = nil + file_cmd_utils_hc_proto_goTypes = nil + file_cmd_utils_hc_proto_depIdxs = nil +} diff --git a/cmd/utils/hc.proto b/cmd/utils/hc.proto new file mode 100644 index 0000000000..7569970420 --- /dev/null +++ b/cmd/utils/hc.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package hc; +option go_package = "github.com/dominant-strategies/go-quai/cmd/utils"; + +import "common/proto_common.proto"; + +message ProtoNode { + optional common.ProtoHash hash = 1; + repeated bytes number = 2; + optional common.ProtoLocation location = 3; + optional bytes entropy = 4; +} + +message ProtoNodeSet { + map node_set = 1; +} diff --git a/cmd/utils/hierarchical_coordinator.go b/cmd/utils/hierarchical_coordinator.go index a0b43ff5f8..39b58c6f09 100644 --- a/cmd/utils/hierarchical_coordinator.go +++ b/cmd/utils/hierarchical_coordinator.go @@ -36,6 +36,7 @@ const ( var ( c_currentExpansionNumberKey = []byte("cexp") + c_bestNodeKey = []byte("best") ) type Node struct { @@ -45,10 +46,61 @@ type Node struct { entropy *big.Int } +func (ch *Node) ProtoEncode() *ProtoNode { + protoNumber := make([][]byte, common.HierarchyDepth) + for i, num := range ch.number { + protoNumber[i] = num.Bytes() + } + protoNode := &ProtoNode{ + Hash: ch.hash.ProtoEncode(), + Number: protoNumber, + Location: ch.location.ProtoEncode(), + Entropy: ch.entropy.Bytes(), + } + return protoNode +} + +func (ch *Node) ProtoDecode(protoNode *ProtoNode) { + hash := &common.Hash{} + hash.ProtoDecode(protoNode.GetHash()) + ch.hash = *hash + + number := make([]*big.Int, common.HierarchyDepth) + for i, num := range protoNode.GetNumber() { + number[i] = new(big.Int).SetBytes(num) + } + ch.number = number + + location := &common.Location{} + location.ProtoDecode(protoNode.GetLocation()) + ch.location = *location + + ch.entropy = new(big.Int).SetBytes(protoNode.GetEntropy()) +} + type NodeSet struct { nodes map[string]Node } +func (ns *NodeSet) ProtoEncode() *ProtoNodeSet { + protoNodeSet := &ProtoNodeSet{} + protoNodeSet.NodeSet = make(map[string]*ProtoNode) + + for loc, node := range ns.nodes { + node := node.ProtoEncode() + protoNodeSet.NodeSet[loc] = node + } + return protoNodeSet +} + +func (ns *NodeSet) ProtoDecode(protoNodeSet *ProtoNodeSet) { + for loc, protoNode := range protoNodeSet.NodeSet { + node := &Node{} + node.ProtoDecode(protoNode) + ns.nodes[loc] = *node + } +} + func (ch *Node) Empty() bool { return ch.hash == common.Hash{} && ch.location.Equal(common.Location{}) && ch.entropy == nil } @@ -137,6 +189,8 @@ func (hc *HierarchicalCoordinator) InitPendingHeaders() { } } hc.Add(new(big.Int).SetUint64(0), nodeSet, hc.pendingHeaders) + + hc.LoadBestNodeSet() } func (hc *HierarchicalCoordinator) Add(entropy *big.Int, node NodeSet, newPendingHeaders *PendingHeaders) { @@ -208,11 +262,14 @@ func (ns *NodeSet) Extendable(wo *types.WorkObject, order int) bool { func (ns *NodeSet) Entropy(numRegions int, numZones int) *big.Int { entropy := new(big.Int) - entropy.Add(entropy, ns.nodes[common.Location{}.Name()].entropy) + primeEntropy := ns.nodes[common.Location{}.Name()].entropy + entropy.Add(entropy, primeEntropy) for i := 0; i < numRegions; i++ { - entropy.Add(entropy, ns.nodes[common.Location{byte(i)}.Name()].entropy) + regionEntropy := ns.nodes[common.Location{byte(i)}.Name()].entropy + entropy.Add(entropy, regionEntropy) for j := 0; j < numZones; j++ { - entropy.Add(entropy, ns.nodes[common.Location{byte(i), byte(j)}.Name()].entropy) + zoneEntropy := ns.nodes[common.Location{byte(i), byte(j)}.Name()].entropy + entropy.Add(entropy, zoneEntropy) } } @@ -430,6 +487,7 @@ func (hc *HierarchicalCoordinator) Stop() { for _, chainEventSub := range hc.chainSubs { chainEventSub.Unsubscribe() } + hc.StoreBestNodeSet() hc.expansionSub.Unsubscribe() hc.db.Close() hc.wg.Wait() @@ -602,44 +660,6 @@ func (hc *HierarchicalCoordinator) ChainEventLoop(chainEvent chan core.ChainEven for { select { case head := <-chainEvent: - // If this is the first block we have after a restart, then we can - // add this block into the node set directly - // Since on startup we initialize the pending headers cache with the - // genesis block, we can check and see if we are in that state - // We can do that by checking the length of the pendding headers order - // cache length is 1 - if len(hc.pendingHeaders.order) == 1 { - // create a nodeset on this block - nodeSet := NodeSet{ - nodes: make(map[string]Node), - } - - //Initialize for prime - backend := hc.GetBackend(common.Location{}) - entropy := backend.TotalLogEntropy(head.Block) - newNode := Node{ - hash: head.Block.ParentHash(common.PRIME_CTX), - number: head.Block.NumberArray(), - location: common.Location{}, - entropy: entropy, - } - nodeSet.nodes[common.Location{}.Name()] = newNode - - regionLocation := common.Location{byte(head.Block.Location().Region())} - backend = hc.GetBackend(regionLocation) - newNode.hash = head.Block.ParentHash(common.REGION_CTX) - newNode.location = regionLocation - newNode.entropy = entropy - nodeSet.nodes[regionLocation.Name()] = newNode - - zoneLocation := head.Block.Location() - backend = hc.GetBackend(zoneLocation) - newNode.hash = head.Block.ParentHash(common.ZONE_CTX) - newNode.location = zoneLocation - newNode.entropy = entropy - nodeSet.nodes[zoneLocation.Name()] = newNode - hc.Add(entropy, nodeSet, hc.pendingHeaders) - } go hc.ReapplicationLoop(head) go hc.ComputeMapPending(head) @@ -1321,3 +1341,50 @@ func (hc *HierarchicalCoordinator) GetBackendForLocationAndOrder(location common } return nil } + +func (hc *HierarchicalCoordinator) StoreBestNodeSet() { + + log.Global.Info("Storing the best node set on stop") + + bestNode, exists := hc.pendingHeaders.collection.Get(hc.bestEntropy.String()) + if !exists { + log.Global.Error("best entropy node set doesnt exist in the pending headers collection") + } + + protoBestNode := bestNode.ProtoEncode() + + data, err := proto.Marshal(protoBestNode) + if err != nil { + log.Global.Error("Error marshalling best node, err: ", err) + return + } + + err = hc.db.Put(c_bestNodeKey, data, nil) + if err != nil { + log.Global.Error("Error storing the best node key, err: ", err) + return + } +} + +func (hc *HierarchicalCoordinator) LoadBestNodeSet() { + data, err := hc.db.Get(c_bestNodeKey, nil) + if err != nil { + log.Global.Error("Error loading the best node, err: ", err) + return + } + + protoNodeSet := &ProtoNodeSet{} + err = proto.Unmarshal(data, protoNodeSet) + if err != nil { + log.Global.Error("Error unmarshalling the proto node set, err: ", err) + return + } + + nodeSet := NodeSet{} + nodeSet.nodes = make(map[string]Node) + nodeSet.ProtoDecode(protoNodeSet) + + numRegions, numZones := common.GetHierarchySizeForExpansionNumber(hc.currentExpansionNumber) + hc.bestEntropy = nodeSet.Entropy(int(numRegions), int(numZones)) + hc.Add(hc.bestEntropy, nodeSet, hc.pendingHeaders) +} diff --git a/core/headerchain.go b/core/headerchain.go index da1408817e..aff75c4aa3 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -1224,7 +1224,7 @@ func (hc *HeaderChain) ComputeExpansionNumber(parent *types.WorkObject) (uint8, } // If the Prime Terminus is genesis the expansion number is the genesis expansion number - if hc.IsGenesisHash(primeTerminusHash) && hc.NodeLocation().Equal(common.Location{0, 0}) { + if hc.IsGenesisHash(primeTerminusHash) && hc.NodeLocation().Equal(common.Location{0, 0}) || hc.config.StartingExpansionNumber != 0 { return primeTerminus.ExpansionNumber(), nil } else { // check if the prime terminus is the block where the threshold count diff --git a/params/config.go b/params/config.go index dbb147558d..e0b527320d 100644 --- a/params/config.go +++ b/params/config.go @@ -112,9 +112,9 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllProgpowProtocolChanges = &ChainConfig{big.NewInt(1337), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false} + AllProgpowProtocolChanges = &ChainConfig{big.NewInt(1337), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false, 0} - TestChainConfig = &ChainConfig{big.NewInt(1), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false} + TestChainConfig = &ChainConfig{big.NewInt(1), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false, 0} TestRules = TestChainConfig.Rules(new(big.Int)) ) @@ -126,12 +126,13 @@ var ( type ChainConfig struct { ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection // Various consensus engines - ConsensusEngine string - Blake3Pow *Blake3powConfig `json:"blake3pow,omitempty"` - Progpow *ProgpowConfig `json:"progpow,omitempty"` - Location common.Location - DefaultGenesisHash common.Hash - IndexAddressUtxos bool + ConsensusEngine string + Blake3Pow *Blake3powConfig `json:"blake3pow,omitempty"` + Progpow *ProgpowConfig `json:"progpow,omitempty"` + Location common.Location + DefaultGenesisHash common.Hash + IndexAddressUtxos bool + StartingExpansionNumber uint64 } // SetLocation sets the location on the chain config diff --git a/quai/backend.go b/quai/backend.go index ce21140b0e..b46dde4557 100644 --- a/quai/backend.go +++ b/quai/backend.go @@ -181,6 +181,7 @@ func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx chainConfig.Location = config.NodeLocation // TODO: See why this is necessary chainConfig.DefaultGenesisHash = config.DefaultGenesisHash chainConfig.IndexAddressUtxos = config.IndexAddressUtxos + chainConfig.StartingExpansionNumber = startingExpansionNumber logger.WithFields(log.Fields{ "Ctx": nodeCtx, "NodeLocation": config.NodeLocation, From ca64903c7e132542f37bc40d992b247d043e0cb5 Mon Sep 17 00:00:00 2001 From: gop Date: Tue, 5 Nov 2024 16:23:30 -0600 Subject: [PATCH 2/9] Added a second goldenage fork number --- params/protocol_params.go | 1 + 1 file changed, 1 insertion(+) diff --git a/params/protocol_params.go b/params/protocol_params.go index febe4f23b0..c44baee828 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -197,6 +197,7 @@ var ( const ( GoldenAgeForkNumberV1 = 180000 + GoldenAgeForkNumberV2 = 586000 GoldenAgeForkGraceNumber = 4000 ) From eb0e6f785b3468ab2945b092663c88466e0058ad Mon Sep 17 00:00:00 2001 From: gop Date: Mon, 4 Nov 2024 11:45:01 -0600 Subject: [PATCH 3/9] Trimming based on fixed depth --- consensus/blake3pow/consensus.go | 313 ++++++++----------------------- consensus/progpow/consensus.go | 303 ++++++++---------------------- core/chain_indexer.go | 2 - core/rawdb/accessors_chain.go | 63 ------- core/rawdb/schema.go | 10 - core/types/utxo.go | 18 +- params/protocol_params.go | 2 - 7 files changed, 165 insertions(+), 546 deletions(-) diff --git a/consensus/blake3pow/consensus.go b/consensus/blake3pow/consensus.go index 72c959e84a..d99ea8c6e7 100644 --- a/consensus/blake3pow/consensus.go +++ b/consensus/blake3pow/consensus.go @@ -5,7 +5,6 @@ import ( "math/big" "runtime" "runtime/debug" - "sort" "sync" "time" @@ -17,9 +16,9 @@ import ( "github.com/dominant-strategies/go-quai/core/rawdb" "github.com/dominant-strategies/go-quai/core/state" "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/crypto/multiset" "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/log" - "github.com/dominant-strategies/go-quai/crypto/multiset" "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/trie" "google.golang.org/protobuf/proto" @@ -657,85 +656,36 @@ func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, batch et } utxoSetSize -= uint64(len(utxosDelete)) - trimDepths := types.TrimDepths - if utxoSetSize > params.SoftMaxUTXOSetSize/2 { - var err error - trimDepths, err = rawdb.ReadTrimDepths(chain.Database(), header.ParentHash(nodeCtx)) - if err != nil || trimDepths == nil { - blake3pow.logger.Errorf("Failed to read trim depths for block %s: %+v", header.ParentHash(nodeCtx).String(), err) - trimDepths = make(map[uint8]uint64, len(types.TrimDepths)) - for denomination, depth := range types.TrimDepths { // copy the default trim depths - trimDepths[denomination] = depth - } - } - if UpdateTrimDepths(trimDepths, utxoSetSize) { - blake3pow.logger.Infof("Updated trim depths at height %d new depths: %+v", header.NumberU64(nodeCtx), trimDepths) - } - if !setRoots { - rawdb.WriteTrimDepths(batch, header.Hash(), trimDepths) - } - } - start := time.Now() - collidingKeys, err := rawdb.ReadCollidingKeys(chain.Database(), header.ParentHash(nodeCtx)) - if err != nil { - blake3pow.logger.Errorf("Failed to read colliding keys for block %s: %+v", header.ParentHash(nodeCtx).String(), err) - } - newCollidingKeys := make([][]byte, 0) trimmedUtxos := make([]*types.SpentUtxoEntry, 0) - var wg sync.WaitGroup - var lock sync.Mutex - for denomination, depth := range trimDepths { - if denomination <= types.MaxTrimDenomination && header.NumberU64(nodeCtx) > depth+params.MinimumTrimDepth { - wg.Add(1) - go func(denomination uint8, depth uint64) { - defer func() { - if r := recover(); r != nil { - blake3pow.logger.WithFields(log.Fields{ - "error": r, - "stacktrace": string(debug.Stack()), - }).Error("Go-Quai Panicked") - } - }() - nextBlockToTrim := rawdb.ReadCanonicalHash(chain.Database(), header.NumberU64(nodeCtx)-depth) - collisions := TrimBlock(chain, batch, denomination, true, header.NumberU64(nodeCtx)-depth, nextBlockToTrim, &utxosDelete, &trimmedUtxos, nil, &utxoSetSize, !setRoots, &lock, blake3pow.logger) // setRoots is false when we are processing the block - if len(collisions) > 0 { - lock.Lock() - newCollidingKeys = append(newCollidingKeys, collisions...) - lock.Unlock() - } - wg.Done() - }(denomination, depth) + + if header.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 { + start := time.Now() + var wg sync.WaitGroup + var lock sync.Mutex + for denomination, depth := range types.TrimDepths { + if denomination <= types.MaxTrimDenomination && header.NumberU64(nodeCtx) > depth { + wg.Add(1) + go func(denomination uint8, depth uint64) { + defer func() { + if r := recover(); r != nil { + blake3pow.logger.WithFields(log.Fields{ + "error": r, + "stacktrace": string(debug.Stack()), + }).Error("Go-Quai Panicked") + } + }() + nextBlockToTrim := rawdb.ReadCanonicalHash(chain.Database(), header.NumberU64(nodeCtx)-depth) + TrimBlock(chain, batch, denomination, header.NumberU64(nodeCtx)-depth, nextBlockToTrim, &utxosDelete, &trimmedUtxos, &utxoSetSize, !setRoots, &lock, blake3pow.logger) // setRoots is false when we are processing the block + wg.Done() + }(denomination, depth) + } + } + wg.Wait() + if len(trimmedUtxos) > 0 { + blake3pow.logger.Infof("Trimmed %d UTXOs from db in %s", len(trimmedUtxos), common.PrettyDuration(time.Since(start))) } - } - if len(collidingKeys) > 0 { - wg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - blake3pow.logger.WithFields(log.Fields{ - "error": r, - "stacktrace": string(debug.Stack()), - }).Error("Go-Quai Panicked") - } - }() - // Trim colliding/duplicate keys here - an optimization could be to do this above in parallel with the other trims - collisions := TrimBlock(chain, batch, 0, false, 0, common.Hash{}, &utxosDelete, &trimmedUtxos, collidingKeys, &utxoSetSize, !setRoots, &lock, blake3pow.logger) - if len(collisions) > 0 { - lock.Lock() - newCollidingKeys = append(newCollidingKeys, collisions...) - lock.Unlock() - } - wg.Done() - }() - } - wg.Wait() - if len(trimmedUtxos) > 0 { - blake3pow.logger.Infof("Trimmed %d UTXOs from db in %s", len(trimmedUtxos), common.PrettyDuration(time.Since(start))) - } - if !setRoots { - rawdb.WriteTrimmedUTXOs(batch, header.Hash(), trimmedUtxos) - if len(newCollidingKeys) > 0 { - rawdb.WriteCollidingKeys(batch, header.Hash(), newCollidingKeys) + if !setRoots { + rawdb.WriteTrimmedUTXOs(batch, header.Hash(), trimmedUtxos) } } for _, hash := range utxosCreate { @@ -757,173 +707,68 @@ func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, batch et // TrimBlock trims all UTXOs of a given denomination that were created in a given block. // In the event of an attacker intentionally creating too many 9-byte keys that collide, we return the colliding keys to be trimmed in the next block. -func TrimBlock(chain consensus.ChainHeaderReader, batch ethdb.Batch, denomination uint8, checkDenom bool, blockHeight uint64, blockHash common.Hash, utxosDelete *[]common.Hash, trimmedUtxos *[]*types.SpentUtxoEntry, collidingKeys [][]byte, utxoSetSize *uint64, deleteFromDb bool, lock *sync.Mutex, logger *log.Logger) [][]byte { - utxosCreated, err := rawdb.ReadPrunedUTXOKeys(chain.Database(), blockHeight) - if err != nil { - logger.Errorf("Failed to read pruned UTXOs for block %d: %+v", blockHeight, err) - } +func TrimBlock(chain consensus.ChainHeaderReader, batch ethdb.Batch, denomination uint8, blockHeight uint64, blockHash common.Hash, utxosDelete *[]common.Hash, trimmedUtxos *[]*types.SpentUtxoEntry, utxoSetSize *uint64, deleteFromDb bool, lock *sync.Mutex, logger *log.Logger) { + utxosCreated, _ := rawdb.ReadCreatedUTXOKeys(chain.Database(), blockHash) if len(utxosCreated) == 0 { - // This should almost never happen, but we need to handle it - utxosCreated, err = rawdb.ReadCreatedUTXOKeys(chain.Database(), blockHash) - if err != nil { - logger.Errorf("Failed to read created UTXOs for block %d: %+v", blockHeight, err) - } - logger.Infof("Reading non-pruned UTXOs for block %d", blockHeight) - for i, key := range utxosCreated { - if len(key) == rawdb.UtxoKeyWithDenominationLength { - if key[len(key)-1] > types.MaxTrimDenomination { - // Don't keep it if the denomination is not trimmed - // The keys are sorted in order of denomination, so we can break here - break - } - key[rawdb.PrunedUtxoKeyWithDenominationLength+len(rawdb.UtxoPrefix)-1] = key[len(key)-1] // place the denomination at the end of the pruned key (11th byte will become 9th byte) - } - // Reduce key size to 9 bytes and cut off the prefix - key = key[len(rawdb.UtxoPrefix) : rawdb.PrunedUtxoKeyWithDenominationLength+len(rawdb.UtxoPrefix)] - utxosCreated[i] = key - } + logger.Infof("UTXOs created in block %d: %d", blockHeight, len(utxosCreated)) + return } logger.Infof("UTXOs created in block %d: %d", blockHeight, len(utxosCreated)) - if len(collidingKeys) > 0 { - logger.Infof("Colliding keys: %d", len(collidingKeys)) - utxosCreated = append(utxosCreated, collidingKeys...) - sort.Slice(utxosCreated, func(i, j int) bool { - return utxosCreated[i][len(utxosCreated[i])-1] < utxosCreated[j][len(utxosCreated[j])-1] - }) - } - newCollisions := make([][]byte, 0) - duplicateKeys := make(map[[36]byte]bool) // cannot use rawdb.UtxoKeyLength for map as it's not const // Start by grabbing all the UTXOs created in the block (that are still in the UTXO set) for _, key := range utxosCreated { - if len(key) != rawdb.PrunedUtxoKeyWithDenominationLength { - continue - } - if checkDenom { - if key[len(key)-1] != denomination { - if key[len(key)-1] > denomination { - break // The keys are stored in order of denomination, so we can stop checking here - } else { - continue - } - } else { - key = append(rawdb.UtxoPrefix, key...) // prepend the db prefix - key = key[:len(key)-1] // remove the denomination byte - } - } - // Check key in database - i := 0 - it := chain.Database().NewIterator(key, nil) - for it.Next() { - data := it.Value() - if len(data) == 0 { - logger.Infof("Empty key found, denomination: %d", denomination) - continue - } - // Check if the key is a duplicate - if len(it.Key()) == rawdb.UtxoKeyLength { - key36 := [36]byte(it.Key()) - if duplicateKeys[key36] { - continue - } else { - duplicateKeys[key36] = true - } - } else { - logger.Errorf("Invalid key length: %d", len(it.Key())) - continue - } - utxoProto := new(types.ProtoTxOut) - if err := proto.Unmarshal(data, utxoProto); err != nil { - logger.Errorf("Failed to unmarshal ProtoTxOut: %+v data: %+v key: %+v", err, data, key) - continue - } - utxo := new(types.UtxoEntry) - if err := utxo.ProtoDecode(utxoProto); err != nil { - logger.WithFields(log.Fields{ - "key": key, - "data": data, - "err": err, - }).Error("Invalid utxo Proto") - continue - } - if checkDenom && utxo.Denomination != denomination { - continue - } - txHash, index, err := rawdb.ReverseUtxoKey(it.Key()) - if err != nil { - logger.WithField("err", err).Error("Failed to parse utxo key") + if key[len(key)-1] != denomination { + if key[len(key)-1] > denomination { + break // The keys are stored in order of denomination, so we can stop checking here + } else { continue } - lock.Lock() - *utxosDelete = append(*utxosDelete, types.UTXOHash(txHash, index, utxo)) - if deleteFromDb { - batch.Delete(it.Key()) - *trimmedUtxos = append(*trimmedUtxos, &types.SpentUtxoEntry{OutPoint: types.OutPoint{txHash, index}, UtxoEntry: utxo}) - } - *utxoSetSize-- - lock.Unlock() - i++ - if i >= types.MaxTrimCollisionsPerKeyPerBlock { - // This will rarely ever happen, but if it does, we should continue trimming this key in the next block - logger.WithField("blockHeight", blockHeight).Error("MaxTrimCollisionsPerBlock exceeded") - newCollisions = append(newCollisions, key) - break - } + } else { + key = key[:len(key)-1] // remove the denomination byte } - it.Release() - } - return newCollisions -} -func UpdateTrimDepths(trimDepths map[uint8]uint64, utxoSetSize uint64) bool { - switch { - case utxoSetSize > params.SoftMaxUTXOSetSize/2 && trimDepths[255] == 0: // 50% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 10) // decrease lifespan of this denomination by 10% - } - trimDepths[255] = 1 // level 1 - case utxoSetSize > params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/4) && trimDepths[255] == 1: // 75% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 5) // decrease lifespan of this denomination by an additional 20% - } - trimDepths[255] = 2 // level 2 - case utxoSetSize > params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/10) && trimDepths[255] == 2: // 90% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 2) // decrease lifespan of this denomination by an additional 50% - } - trimDepths[255] = 3 // level 3 - case utxoSetSize > params.SoftMaxUTXOSetSize && trimDepths[255] == 3: - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 2) // decrease lifespan of this denomination by an additional 50% - } - trimDepths[255] = 4 // level 4 - - // Resets - case utxoSetSize <= params.SoftMaxUTXOSetSize/2 && trimDepths[255] == 1: // Below 50% full - for denomination, depth := range types.TrimDepths { // reset to the default trim depths - trimDepths[denomination] = depth - } - trimDepths[255] = 0 // level 0 - case utxoSetSize <= params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/4) && trimDepths[255] == 2: // Below 75% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth + (depth / 5) // increase lifespan of this denomination by 20% - } - trimDepths[255] = 1 // level 1 - case utxoSetSize <= params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/10) && trimDepths[255] == 3: // Below 90% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth + (depth / 2) // increase lifespan of this denomination by 50% - } - trimDepths[255] = 2 // level 2 - case utxoSetSize <= params.SoftMaxUTXOSetSize && trimDepths[255] == 4: // Below 100% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth + (depth / 2) // increase lifespan of this denomination by 50% - } - trimDepths[255] = 3 // level 3 - default: - return false + data, _ := chain.Database().Get(key) + if len(data) == 0 { + logger.Infof("Empty key found, denomination: %d", denomination) + continue + } + utxoProto := new(types.ProtoTxOut) + if err := proto.Unmarshal(data, utxoProto); err != nil { + logger.Errorf("Failed to unmarshal ProtoTxOut: %+v data: %+v key: %+v", err, data, key) + continue + } + utxo := new(types.UtxoEntry) + if err := utxo.ProtoDecode(utxoProto); err != nil { + logger.WithFields(log.Fields{ + "key": key, + "data": data, + "err": err, + }).Error("Invalid utxo Proto") + continue + } + if utxo.Denomination != denomination { + continue + } + // Only the coinbase and conversion txs are allowed to have lockups that + // is non zero + if utxo.Lock.Sign() == 0 { + continue + } + txHash, index, err := rawdb.ReverseUtxoKey(key) + if err != nil { + logger.WithField("err", err).Error("Failed to parse utxo key") + continue + } + lock.Lock() + *utxosDelete = append(*utxosDelete, types.UTXOHash(txHash, index, utxo)) + if deleteFromDb { + batch.Delete(key) + *trimmedUtxos = append(*trimmedUtxos, &types.SpentUtxoEntry{OutPoint: types.OutPoint{txHash, index}, UtxoEntry: utxo}) + } + *utxoSetSize-- + lock.Unlock() } - return true } // FinalizeAndAssemble implements consensus.Engine, accumulating the block and diff --git a/consensus/progpow/consensus.go b/consensus/progpow/consensus.go index 5ac3cbdf0b..74fcc9aee8 100644 --- a/consensus/progpow/consensus.go +++ b/consensus/progpow/consensus.go @@ -6,7 +6,6 @@ import ( "math/big" "runtime" "runtime/debug" - "sort" "sync" "time" @@ -18,9 +17,9 @@ import ( "github.com/dominant-strategies/go-quai/core/rawdb" "github.com/dominant-strategies/go-quai/core/state" "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/crypto/multiset" "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/log" - "github.com/dominant-strategies/go-quai/crypto/multiset" "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/trie" "google.golang.org/protobuf/proto" @@ -715,85 +714,35 @@ func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, batch ethdb. } utxoSetSize -= uint64(len(utxosDelete)) - trimDepths := types.TrimDepths - if utxoSetSize > params.SoftMaxUTXOSetSize/2 { - var err error - trimDepths, err = rawdb.ReadTrimDepths(chain.Database(), header.ParentHash(nodeCtx)) - if err != nil || trimDepths == nil { - progpow.logger.Errorf("Failed to read trim depths for block %s: %+v", header.ParentHash(nodeCtx).String(), err) - trimDepths = make(map[uint8]uint64, len(types.TrimDepths)) - for denomination, depth := range types.TrimDepths { // copy the default trim depths - trimDepths[denomination] = depth - } - } - if UpdateTrimDepths(trimDepths, utxoSetSize) { - progpow.logger.Infof("Updated trim depths at height %d new depths: %+v", header.NumberU64(nodeCtx), trimDepths) - } - if !setRoots { - rawdb.WriteTrimDepths(batch, header.Hash(), trimDepths) - } - } - start := time.Now() - collidingKeys, err := rawdb.ReadCollidingKeys(chain.Database(), header.ParentHash(nodeCtx)) - if err != nil { - progpow.logger.Errorf("Failed to read colliding keys for block %s: %+v", header.ParentHash(nodeCtx).String(), err) - } - newCollidingKeys := make([][]byte, 0) trimmedUtxos := make([]*types.SpentUtxoEntry, 0) - var wg sync.WaitGroup - var lock sync.Mutex - for denomination, depth := range trimDepths { - if denomination <= types.MaxTrimDenomination && header.NumberU64(nodeCtx) > depth+params.MinimumTrimDepth { - wg.Add(1) - go func(denomination uint8, depth uint64) { - defer func() { - if r := recover(); r != nil { - progpow.logger.WithFields(log.Fields{ - "error": r, - "stacktrace": string(debug.Stack()), - }).Error("Go-Quai Panicked") - } - }() - nextBlockToTrim := rawdb.ReadCanonicalHash(chain.Database(), header.NumberU64(nodeCtx)-depth) - collisions := TrimBlock(chain, batch, denomination, true, header.NumberU64(nodeCtx)-depth, nextBlockToTrim, &utxosDelete, &trimmedUtxos, nil, &utxoSetSize, !setRoots, &lock, progpow.logger) // setRoots is false when we are processing the block - if len(collisions) > 0 { - lock.Lock() - newCollidingKeys = append(newCollidingKeys, collisions...) - lock.Unlock() - } - wg.Done() - }(denomination, depth) + if header.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 { + start := time.Now() + var wg sync.WaitGroup + var lock sync.Mutex + for denomination, depth := range types.TrimDepths { + if denomination <= types.MaxTrimDenomination && header.NumberU64(nodeCtx) > depth { + wg.Add(1) + go func(denomination uint8, depth uint64) { + defer func() { + if r := recover(); r != nil { + progpow.logger.WithFields(log.Fields{ + "error": r, + "stacktrace": string(debug.Stack()), + }).Error("Go-Quai Panicked") + } + }() + nextBlockToTrim := rawdb.ReadCanonicalHash(chain.Database(), header.NumberU64(nodeCtx)-depth) + TrimBlock(chain, batch, denomination, header.NumberU64(nodeCtx)-depth, nextBlockToTrim, &utxosDelete, &trimmedUtxos, &utxoSetSize, !setRoots, &lock, progpow.logger) // setRoots is false when we are processing the block + wg.Done() + }(denomination, depth) + } + } + wg.Wait() + if len(trimmedUtxos) > 0 { + progpow.logger.Infof("Trimmed %d UTXOs from db in %s", len(trimmedUtxos), common.PrettyDuration(time.Since(start))) } - } - if len(collidingKeys) > 0 { - wg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - progpow.logger.WithFields(log.Fields{ - "error": r, - "stacktrace": string(debug.Stack()), - }).Error("Go-Quai Panicked") - } - }() - // Trim colliding/duplicate keys here - an optimization could be to do this above in parallel with the other trims - collisions := TrimBlock(chain, batch, 0, false, 0, common.Hash{}, &utxosDelete, &trimmedUtxos, collidingKeys, &utxoSetSize, !setRoots, &lock, progpow.logger) - if len(collisions) > 0 { - lock.Lock() - newCollidingKeys = append(newCollidingKeys, collisions...) - lock.Unlock() - } - wg.Done() - }() - } - wg.Wait() - if len(trimmedUtxos) > 0 { - progpow.logger.Infof("Trimmed %d UTXOs from db in %s", len(trimmedUtxos), common.PrettyDuration(time.Since(start))) - } - if !setRoots { - rawdb.WriteTrimmedUTXOs(batch, header.Hash(), trimmedUtxos) - if len(newCollidingKeys) > 0 { - rawdb.WriteCollidingKeys(batch, header.Hash(), newCollidingKeys) + if !setRoots { + rawdb.WriteTrimmedUTXOs(batch, header.Hash(), trimmedUtxos) } } for _, hash := range utxosCreate { @@ -815,164 +764,68 @@ func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, batch ethdb. // TrimBlock trims all UTXOs of a given denomination that were created in a given block. // In the event of an attacker intentionally creating too many 9-byte keys that collide, we return the colliding keys to be trimmed in the next block. -func TrimBlock(chain consensus.ChainHeaderReader, batch ethdb.Batch, denomination uint8, checkDenom bool, blockHeight uint64, blockHash common.Hash, utxosDelete *[]common.Hash, trimmedUtxos *[]*types.SpentUtxoEntry, collidingKeys [][]byte, utxoSetSize *uint64, deleteFromDb bool, lock *sync.Mutex, logger *log.Logger) [][]byte { - utxosCreated, _ := rawdb.ReadPrunedUTXOKeys(chain.Database(), blockHeight) +func TrimBlock(chain consensus.ChainHeaderReader, batch ethdb.Batch, denomination uint8, blockHeight uint64, blockHash common.Hash, utxosDelete *[]common.Hash, trimmedUtxos *[]*types.SpentUtxoEntry, utxoSetSize *uint64, deleteFromDb bool, lock *sync.Mutex, logger *log.Logger) { + utxosCreated, _ := rawdb.ReadCreatedUTXOKeys(chain.Database(), blockHash) if len(utxosCreated) == 0 { - // This should almost never happen, but we need to handle it - utxosCreated, _ = rawdb.ReadCreatedUTXOKeys(chain.Database(), blockHash) - logger.Infof("Reading non-pruned UTXOs for block %d", blockHeight) - for i, key := range utxosCreated { - if len(key) == rawdb.UtxoKeyWithDenominationLength { - if key[len(key)-1] > types.MaxTrimDenomination { - // Don't keep it if the denomination is not trimmed - // The keys are sorted in order of denomination, so we can break here - break - } - key[rawdb.PrunedUtxoKeyWithDenominationLength+len(rawdb.UtxoPrefix)-1] = key[len(key)-1] // place the denomination at the end of the pruned key (11th byte will become 9th byte) - } - // Reduce key size to 9 bytes and cut off the prefix - key = key[len(rawdb.UtxoPrefix) : rawdb.PrunedUtxoKeyWithDenominationLength+len(rawdb.UtxoPrefix)] - utxosCreated[i] = key - } + logger.Infof("UTXOs created in block %d: %d", blockHeight, len(utxosCreated)) + return } logger.Infof("UTXOs created in block %d: %d", blockHeight, len(utxosCreated)) - if len(collidingKeys) > 0 { - logger.Infof("Colliding keys: %d", len(collidingKeys)) - utxosCreated = append(utxosCreated, collidingKeys...) - sort.Slice(utxosCreated, func(i, j int) bool { - return utxosCreated[i][len(utxosCreated[i])-1] < utxosCreated[j][len(utxosCreated[j])-1] - }) - } - newCollisions := make([][]byte, 0) - duplicateKeys := make(map[[36]byte]bool) // cannot use rawdb.UtxoKeyLength for map as it's not const // Start by grabbing all the UTXOs created in the block (that are still in the UTXO set) for _, key := range utxosCreated { - if len(key) != rawdb.PrunedUtxoKeyWithDenominationLength { - continue - } - if checkDenom { - if key[len(key)-1] != denomination { - if key[len(key)-1] > denomination { - break // The keys are stored in order of denomination, so we can stop checking here - } else { - continue - } - } else { - key = append(rawdb.UtxoPrefix, key...) // prepend the db prefix - key = key[:len(key)-1] // remove the denomination byte - } - } - // Check key in database - i := 0 - it := chain.Database().NewIterator(key, nil) - for it.Next() { - data := it.Value() - if len(data) == 0 { - logger.Infof("Empty key found, denomination: %d", denomination) - continue - } - // Check if the key is a duplicate - if len(it.Key()) == rawdb.UtxoKeyLength { - key36 := [36]byte(it.Key()) - if duplicateKeys[key36] { - continue - } else { - duplicateKeys[key36] = true - } - } - utxoProto := new(types.ProtoTxOut) - if err := proto.Unmarshal(data, utxoProto); err != nil { - logger.Errorf("Failed to unmarshal ProtoTxOut: %+v data: %+v key: %+v", err, data, key) - continue - } - utxo := new(types.UtxoEntry) - if err := utxo.ProtoDecode(utxoProto); err != nil { - logger.WithFields(log.Fields{ - "key": key, - "data": data, - "err": err, - }).Error("Invalid utxo Proto") - continue - } - if checkDenom && utxo.Denomination != denomination { - continue - } - txHash, index, err := rawdb.ReverseUtxoKey(it.Key()) - if err != nil { - logger.WithField("err", err).Error("Failed to parse utxo key") + if key[len(key)-1] != denomination { + if key[len(key)-1] > denomination { + break // The keys are stored in order of denomination, so we can stop checking here + } else { continue } - lock.Lock() - *utxosDelete = append(*utxosDelete, types.UTXOHash(txHash, index, utxo)) - if deleteFromDb { - batch.Delete(it.Key()) - *trimmedUtxos = append(*trimmedUtxos, &types.SpentUtxoEntry{OutPoint: types.OutPoint{txHash, index}, UtxoEntry: utxo}) - } - *utxoSetSize-- - lock.Unlock() - i++ - if i >= types.MaxTrimCollisionsPerKeyPerBlock { - // This will rarely ever happen, but if it does, we should continue trimming this key in the next block - logger.WithField("blockHeight", blockHeight).Error("MaxTrimCollisionsPerBlock exceeded") - newCollisions = append(newCollisions, key) - break - } + } else { + key = key[:len(key)-1] // remove the denomination byte } - it.Release() - } - return newCollisions -} -func UpdateTrimDepths(trimDepths map[uint8]uint64, utxoSetSize uint64) bool { - switch { - case utxoSetSize > params.SoftMaxUTXOSetSize/2 && trimDepths[255] == 0: // 50% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 10) // decrease lifespan of this denomination by 10% - } - trimDepths[255] = 1 // level 1 - case utxoSetSize > params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/4) && trimDepths[255] == 1: // 75% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 5) // decrease lifespan of this denomination by an additional 20% - } - trimDepths[255] = 2 // level 2 - case utxoSetSize > params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/10) && trimDepths[255] == 2: // 90% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 2) // decrease lifespan of this denomination by an additional 50% - } - trimDepths[255] = 3 // level 3 - case utxoSetSize > params.SoftMaxUTXOSetSize && trimDepths[255] == 3: - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth - (depth / 2) // decrease lifespan of this denomination by an additional 50% - } - trimDepths[255] = 4 // level 4 - - // Resets - case utxoSetSize <= params.SoftMaxUTXOSetSize/2 && trimDepths[255] == 1: // Below 50% full - for denomination, depth := range types.TrimDepths { // reset to the default trim depths - trimDepths[denomination] = depth - } - trimDepths[255] = 0 // level 0 - case utxoSetSize <= params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/4) && trimDepths[255] == 2: // Below 75% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth + (depth / 5) // increase lifespan of this denomination by 20% - } - trimDepths[255] = 1 // level 1 - case utxoSetSize <= params.SoftMaxUTXOSetSize-(params.SoftMaxUTXOSetSize/10) && trimDepths[255] == 3: // Below 90% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth + (depth / 2) // increase lifespan of this denomination by 50% - } - trimDepths[255] = 2 // level 2 - case utxoSetSize <= params.SoftMaxUTXOSetSize && trimDepths[255] == 4: // Below 100% full - for denomination, depth := range trimDepths { - trimDepths[denomination] = depth + (depth / 2) // increase lifespan of this denomination by 50% - } - trimDepths[255] = 3 // level 3 - default: - return false + data, _ := chain.Database().Get(key) + if len(data) == 0 { + logger.Infof("Empty key found, denomination: %d", denomination) + continue + } + utxoProto := new(types.ProtoTxOut) + if err := proto.Unmarshal(data, utxoProto); err != nil { + logger.Errorf("Failed to unmarshal ProtoTxOut: %+v data: %+v key: %+v", err, data, key) + continue + } + utxo := new(types.UtxoEntry) + if err := utxo.ProtoDecode(utxoProto); err != nil { + logger.WithFields(log.Fields{ + "key": key, + "data": data, + "err": err, + }).Error("Invalid utxo Proto") + continue + } + if utxo.Denomination != denomination { + continue + } + // Only the coinbase and conversion txs are allowed to have lockups that + // is non zero + if utxo.Lock.Sign() == 0 { + continue + } + txHash, index, err := rawdb.ReverseUtxoKey(key) + if err != nil { + logger.WithField("err", err).Error("Failed to parse utxo key") + continue + } + lock.Lock() + *utxosDelete = append(*utxosDelete, types.UTXOHash(txHash, index, utxo)) + if deleteFromDb { + batch.Delete(key) + *trimmedUtxos = append(*trimmedUtxos, &types.SpentUtxoEntry{OutPoint: types.OutPoint{txHash, index}, UtxoEntry: utxo}) + } + *utxoSetSize-- + lock.Unlock() } - return true } // FinalizeAndAssemble implements consensus.Engine, accumulating the block and diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 70d36761c7..d9e2b2fe17 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -424,8 +424,6 @@ func (c *ChainIndexer) PruneOldBlockData(blockHeight uint64) { rawdb.DeleteCreatedUTXOKeys(c.chainDb, blockHash) rawdb.DeleteSpentUTXOs(c.chainDb, blockHash) rawdb.DeleteTrimmedUTXOs(c.chainDb, blockHash) - rawdb.DeleteTrimDepths(c.chainDb, blockHash) - rawdb.DeleteCollidingKeys(c.chainDb, blockHash) } func compareMinLength(a, b []byte) bool { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 638a993a1a..9051dabe16 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1590,69 +1590,6 @@ func DeleteTrimmedUTXOs(db ethdb.KeyValueWriter, blockHash common.Hash) { } } -func ReadTrimDepths(db ethdb.Reader, blockHash common.Hash) (map[uint8]uint64, error) { - data, _ := db.Get(trimDepthsKey(blockHash)) - if len(data) == 0 { - return nil, nil - } - protoTrimDepths := new(types.ProtoTrimDepths) - if err := proto.Unmarshal(data, protoTrimDepths); err != nil { - return nil, err - } - trimDepths := make(map[uint8]uint64, len(protoTrimDepths.TrimDepths)) - for denomination, depth := range protoTrimDepths.TrimDepths { - trimDepths[uint8(denomination)] = depth - } - return trimDepths, nil -} - -func WriteTrimDepths(db ethdb.KeyValueWriter, blockHash common.Hash, trimDepths map[uint8]uint64) error { - protoTrimDepths := &types.ProtoTrimDepths{TrimDepths: make(map[uint32]uint64, len(trimDepths))} - for denomination, depth := range trimDepths { - protoTrimDepths.TrimDepths[uint32(denomination)] = depth - } - data, err := proto.Marshal(protoTrimDepths) - if err != nil { - db.Logger().WithField("err", err).Fatal("Failed to rlp encode utxo") - } - return db.Put(trimDepthsKey(blockHash), data) -} - -func DeleteTrimDepths(db ethdb.KeyValueWriter, blockHash common.Hash) { - if err := db.Delete(trimDepthsKey(blockHash)); err != nil { - db.Logger().WithField("err", err).Fatal("Failed to delete trim depths") - } -} - -func ReadCollidingKeys(db ethdb.Reader, blockHash common.Hash) ([][]byte, error) { - data, _ := db.Get(collidingKeysKey(blockHash)) - if len(data) == 0 { - return nil, nil - } - protoKeys := new(types.ProtoKeys) - if err := proto.Unmarshal(data, protoKeys); err != nil { - return nil, err - } - return protoKeys.Keys, nil -} - -func WriteCollidingKeys(db ethdb.KeyValueWriter, blockHash common.Hash, keys [][]byte) error { - protoKeys := &types.ProtoKeys{Keys: make([][]byte, 0, len(keys))} - protoKeys.Keys = append(protoKeys.Keys, keys...) - - data, err := proto.Marshal(protoKeys) - if err != nil { - db.Logger().WithField("err", err).Fatal("Failed to rlp encode utxo") - } - return db.Put(collidingKeysKey(blockHash), data) -} - -func DeleteCollidingKeys(db ethdb.KeyValueWriter, blockHash common.Hash) { - if err := db.Delete(collidingKeysKey(blockHash)); err != nil { - db.Logger().WithField("err", err).Fatal("Failed to delete colliding keys") - } -} - func ReadAlreadyPruned(db ethdb.Reader, blockHash common.Hash) bool { data, _ := db.Get(alreadyPrunedKey(blockHash)) return len(data) > 0 diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index fd170e468e..44038b653d 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -91,8 +91,6 @@ var ( utxoPrefix = []byte("ut") // outpointPrefix + hash -> types.Outpoint spentUTXOsPrefix = []byte("sutxo") // spentUTXOsPrefix + hash -> []types.SpentTxOut trimmedUTXOsPrefix = []byte("tutxo") // trimmedUTXOsPrefix + hash -> []types.SpentTxOut - trimDepthsPrefix = []byte("td") // trimDepthsPrefix + hash -> uint64 - collidingKeysPrefix = []byte("ck") // collidingKeysPrefix + hash -> [][]byte createdUTXOsPrefix = []byte("cutxo") // createdUTXOsPrefix + hash -> []common.Hash prunedUTXOKeysPrefix = []byte("putxo") // prunedUTXOKeysPrefix + num (uint64 big endian) -> hash prunedPrefix = []byte("pru") // prunedPrefix + hash -> pruned @@ -363,14 +361,6 @@ func lastTrimmedBlockKey(hash common.Hash) []byte { return append(lastTrimmedBlockPrefix, hash.Bytes()...) } -func trimDepthsKey(hash common.Hash) []byte { - return append(trimDepthsPrefix, hash.Bytes()...) -} - -func collidingKeysKey(hash common.Hash) []byte { - return append(collidingKeysPrefix, hash.Bytes()...) -} - func alreadyPrunedKey(hash common.Hash) []byte { return append(prunedPrefix, hash.Bytes()...) } diff --git a/core/types/utxo.go b/core/types/utxo.go index 8c0d376717..2ead36f234 100644 --- a/core/types/utxo.go +++ b/core/types/utxo.go @@ -16,9 +16,8 @@ import ( const ( MaxDenomination = 14 - MaxOutputIndex = math.MaxUint16 - MaxTrimDenomination = 5 - MaxTrimCollisionsPerKeyPerBlock = 1000 + MaxOutputIndex = math.MaxUint16 + MaxTrimDenomination = 5 ) var MaxQi = new(big.Int).Mul(big.NewInt(math.MaxInt64), big.NewInt(params.Ether)) // This is just a default; determine correct value later @@ -48,13 +47,12 @@ func init() { Denominations[14] = big.NewInt(1000000000) // 1000000 Qi TrimDepths = make(map[uint8]uint64) - TrimDepths[0] = 720 // 2 hours - TrimDepths[1] = 720 // 2 hours - TrimDepths[2] = 1080 // 3 hours - TrimDepths[3] = 1080 // 3 hours - TrimDepths[4] = 2160 // 6 hours - TrimDepths[5] = 4320 // 12 hours - TrimDepths[6] = 8640 // 24 hours + TrimDepths[0] = params.GoldenAgeForkNumberV2 + 720 // 2 hours after fork starts from block 1 + TrimDepths[1] = params.GoldenAgeForkNumberV2 + 720 // 2 hours + TrimDepths[2] = params.GoldenAgeForkNumberV2 + 1080 // 3 hours + TrimDepths[3] = params.GoldenAgeForkNumberV2 + 1080 // 3 hours + TrimDepths[4] = params.GoldenAgeForkNumberV2 + 2160 // 6 hours + TrimDepths[5] = params.GoldenAgeForkNumberV2 + 4320 // 12 hours } type TxIns []TxIn diff --git a/params/protocol_params.go b/params/protocol_params.go index c44baee828..a3bca06670 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -152,8 +152,6 @@ const ( NewConversionLockPeriod uint64 = 7200 MinQiConversionDenomination = 10 ConversionConfirmationContext = common.PRIME_CTX // A conversion requires a single coincident Dom confirmation - SoftMaxUTXOSetSize = math.MaxInt // The soft maximum number of UTXOs that can be stored in the UTXO set - MinimumTrimDepth = math.MaxInt // The minimum block depth of the chain to begin trimming QiToQuaiConversionGas = 100000 // The gas used to convert Qi to Quai DefaultCoinbaseLockup = 0 // The default lockup byte for coinbase rewards ) From 52ece8279dab5bd5f6f2db104abd9be82fe7372b Mon Sep 17 00:00:00 2001 From: Jonathan Downing Date: Mon, 4 Nov 2024 16:12:36 -0600 Subject: [PATCH 4/9] Disallow UTXO locks and don't immediately remove aggregation Qi tx from pool --- core/state_processor.go | 7 ++++++- core/worker.go | 5 ++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 13149ca691..3d85b907c9 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -1076,6 +1076,9 @@ func ValidateQiTxInputs(tx *types.Transaction, chain ChainContext, db ethdb.Read types.MaxDenomination) return nil, errors.New(str) } + if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && txOut.Lock != nil && txOut.Lock.Sign() != 0 { + return nil, errors.New("QiTx output has non-zero lock") + } outputs[uint(txOut.Denomination)]++ if common.IsConversionOutput(txOut.Address, location) { // Qi->Quai conversion outputs[uint(txOut.Denomination)] -= 1 // This output no longer exists because it has been aggregated @@ -1117,7 +1120,9 @@ func ValidateQiTxOutputsAndSignature(tx *types.Transaction, chain ChainContext, if txOutIdx > types.MaxOutputIndex { return nil, fmt.Errorf("tx [%v] exceeds max output index of %d", tx.Hash().Hex(), types.MaxOutputIndex) } - + if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && txOut.Lock != nil && txOut.Lock.Sign() != 0 { + return nil, errors.New("QiTx output has non-zero lock") + } if txOut.Denomination > types.MaxDenomination { str := fmt.Sprintf("transaction output value of %v is "+ "higher than max allowed value of %v", diff --git a/core/worker.go b/core/worker.go index d00100af5d..ea8ce31f88 100644 --- a/core/worker.go +++ b/core/worker.go @@ -1327,7 +1327,7 @@ func (w *worker) commitTransactions(env *environment, primeTerminus *types.WorkO break } if err := w.processQiTx(tx, env, primeTerminus, parent, firstQiTx); err != nil { - if strings.Contains(err.Error(), "emits too many") || strings.Contains(err.Error(), "double spends") { + if strings.Contains(err.Error(), "emits too many") || strings.Contains(err.Error(), "double spends") || strings.Contains(err.Error(), "combine smaller denominations") { // This is not an invalid tx, our block is just full of ETXs // Alternatively, a tx double spends a cached deleted UTXO, likely replaced-by-fee txs.PopNoSort() @@ -2088,6 +2088,9 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment, primeTermi types.MaxDenomination) return errors.New(str) } + if env.wo.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && txOut.Lock != nil && txOut.Lock.Sign() != 0 { + return errors.New("QiTx output has non-zero lock") + } outputs[uint(txOut.Denomination)] += 1 totalQitOut.Add(totalQitOut, types.Denominations[txOut.Denomination]) toAddr := common.BytesToAddress(txOut.Address, location) From e06e2cff5e6fd4bdc8c217d44f7be2d902c1147e Mon Sep 17 00:00:00 2001 From: Jonathan Downing Date: Mon, 4 Nov 2024 16:24:38 -0600 Subject: [PATCH 5/9] Fix Qi-Quai conversions by checking the total Qit amount out instead of each output --- core/state_processor.go | 10 ++++++++-- core/worker.go | 5 ++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 3d85b907c9..281416a2cc 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -1142,7 +1142,7 @@ func ValidateQiTxOutputsAndSignature(tx *types.Transaction, chain ChainContext, if toAddr.Location().Equal(location) && toAddr.IsInQuaiLedgerScope() { // Qi->Quai conversion conversion = true - if txOut.Denomination < params.MinQiConversionDenomination { + if currentHeader.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 && txOut.Denomination < params.MinQiConversionDenomination { return nil, fmt.Errorf("tx %v emits UTXO with value %d less than minimum denomination %d", tx.Hash().Hex(), txOut.Denomination, params.MinQiConversionDenomination) } totalConvertQitOut.Add(totalConvertQitOut, types.Denominations[txOut.Denomination]) // Add to total conversion output for aggregation @@ -1207,6 +1207,9 @@ func ValidateQiTxOutputsAndSignature(tx *types.Transaction, chain ChainContext, return nil, fmt.Errorf("tx %032x has insufficient fee for base fee, have %d want %d", tx.Hash(), txFeeInQuai.Uint64(), minimumFeeInQuai.Uint64()) } if conversion { + if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && totalConvertQitOut.Cmp(types.Denominations[params.MinQiConversionDenomination]) < 0 { + return nil, fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), totalConvertQitOut.Uint64()) + } ETXPCount++ if ETXPCount > etxPLimit { return nil, fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, etxPLimit) @@ -1360,7 +1363,7 @@ func ProcessQiTx(tx *types.Transaction, chain ChainContext, checkSig bool, isFir if toAddr.Location().Equal(location) && toAddr.IsInQuaiLedgerScope() { // Qi->Quai conversion conversion = true convertAddress = toAddr - if txOut.Denomination < params.MinQiConversionDenomination { + if currentHeader.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 && txOut.Denomination < params.MinQiConversionDenomination { return nil, nil, nil, fmt.Errorf("tx %v emits UTXO with value %d less than minimum denomination %d", tx.Hash().Hex(), txOut.Denomination, params.MinQiConversionDenomination), nil } totalConvertQitOut.Add(totalConvertQitOut, types.Denominations[txOut.Denomination]) // Add to total conversion output for aggregation @@ -1442,6 +1445,9 @@ func ProcessQiTx(tx *types.Transaction, chain ChainContext, checkSig bool, isFir return nil, nil, nil, fmt.Errorf("tx %032x has insufficient fee for base fee, have %d want %d", tx.Hash(), txFeeInQuai.Uint64(), minimumFeeInQuai.Uint64()), nil } if conversion { + if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && totalConvertQitOut.Cmp(types.Denominations[params.MinQiConversionDenomination]) < 0 { + return nil, nil, nil, fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), totalConvertQitOut.Uint64()), nil + } // Since this transaction contains a conversion, the rest of the tx gas is given to conversion remainingTxFeeInQuai := misc.QiToQuai(parent, txFeeInQit) // Fee is basefee * gas, so gas remaining is fee remaining / basefee diff --git a/core/worker.go b/core/worker.go index ea8ce31f88..4850576325 100644 --- a/core/worker.go +++ b/core/worker.go @@ -2105,7 +2105,7 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment, primeTermi } conversion = true convertAddress = toAddr - if txOut.Denomination < params.MinQiConversionDenomination { + if txOut.Denomination < params.MinQiConversionDenomination && env.wo.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { return fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), txOut.Denomination) } totalConvertQitOut.Add(totalConvertQitOut, types.Denominations[txOut.Denomination]) // Add to total conversion output for aggregation @@ -2184,6 +2184,9 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment, primeTermi return fmt.Errorf("tx %032x has insufficient fee for base fee * gas, have %d want %d", tx.Hash(), txFeeInQit.Uint64(), minimumFeeInQuai.Uint64()) } if conversion { + if env.wo.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && totalConvertQitOut.Cmp(types.Denominations[params.MinQiConversionDenomination]) < 0 { + return fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), totalConvertQitOut.Uint64()) + } // Since this transaction contains a conversion, the rest of the tx gas is given to conversion remainingTxFeeInQuai := misc.QiToQuai(parent, txFeeInQit) // Fee is basefee * gas, so gas remaining is fee remaining / basefee From 159f1d67596b0a23acf4adc7bae2ff31e9ec6a3e Mon Sep 17 00:00:00 2001 From: gop Date: Wed, 6 Nov 2024 15:48:45 -0600 Subject: [PATCH 6/9] Added a 30% juice on coinbase reward for few hours after the fork --- consensus/misc/rewards.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/consensus/misc/rewards.go b/consensus/misc/rewards.go index 04794573cd..bceaf6c10b 100644 --- a/consensus/misc/rewards.go +++ b/consensus/misc/rewards.go @@ -11,11 +11,19 @@ import ( ) func CalculateReward(parent *types.WorkObject, header *types.WorkObjectHeader) *big.Int { + var reward *big.Int if header.PrimaryCoinbase().IsInQiLedgerScope() { - return CalculateQiReward(parent.WorkObjectHeader()) + reward = new(big.Int).Set(CalculateQiReward(parent.WorkObjectHeader())) } else { - return CalculateQuaiReward(parent) + reward = new(big.Int).Set(CalculateQuaiReward(parent)) } + + // ~30% extra reward for grace number of blocks after the fork to encourage nodes to move to the fork + if header.NumberU64() >= params.GoldenAgeForkNumberV2 && header.NumberU64() < params.GoldenAgeForkNumberV2+params.GoldenAgeForkGraceNumber { + reward = new(big.Int).Add(reward, new(big.Int).Div(reward, big.NewInt(70))) + } + + return reward } // Calculate the amount of Quai that Qi can be converted to. Expect the current Header and the Qi amount in "qits", returns the quai amount in "its" From 0c4571de4812fc4f117ad7ec13a0555efe47a6a9 Mon Sep 17 00:00:00 2001 From: gop Date: Fri, 8 Nov 2024 10:38:41 -0600 Subject: [PATCH 7/9] [HAL-18] Performing multiplication before the division to keep more precesion in the calcorder --- consensus/blake3pow/poem.go | 23 +++++++++++++++++++---- consensus/progpow/poem.go | 23 +++++++++++++++++++---- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/consensus/blake3pow/poem.go b/consensus/blake3pow/poem.go index c7368fa8c4..6e06b1ad5b 100644 --- a/consensus/blake3pow/poem.go +++ b/consensus/blake3pow/poem.go @@ -44,8 +44,15 @@ func (blake3pow *Blake3pow) CalcOrder(chain consensus.BlockReader, header *types // the given header determines the prime block totalDeltaEntropyPrime := new(big.Int).Add(header.ParentDeltaEntropy(common.REGION_CTX), header.ParentDeltaEntropy(common.ZONE_CTX)) totalDeltaEntropyPrime = new(big.Int).Add(totalDeltaEntropyPrime, intrinsicEntropy) - primeDeltaEntropyTarget := new(big.Int).Div(params.PrimeEntropyTarget(expansionNum), big2) - primeDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, primeDeltaEntropyTarget) + + var primeDeltaEntropyTarget *big.Int + if header.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + primeDeltaEntropyTarget = new(big.Int).Div(params.PrimeEntropyTarget(expansionNum), big2) + primeDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, primeDeltaEntropyTarget) + } else { + primeDeltaEntropyTarget = new(big.Int).Mul(params.PrimeEntropyTarget(expansionNum), zoneThresholdEntropy) + primeDeltaEntropyTarget = new(big.Int).Div(primeDeltaEntropyTarget, common.Big2) + } primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdEntropy, common.BitsToBigBits(params.PrimeEntropyTarget(expansionNum))) if intrinsicEntropy.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaEntropyPrime.Cmp(primeDeltaEntropyTarget) > 0 { @@ -56,8 +63,16 @@ func (blake3pow *Blake3pow) CalcOrder(chain consensus.BlockReader, header *types // REGION // Compute the total accumulated entropy since the last region block totalDeltaEntropyRegion := new(big.Int).Add(header.ParentDeltaEntropy(common.ZONE_CTX), intrinsicEntropy) - regionDeltaEntropyTarget := new(big.Int).Div(params.RegionEntropyTarget(expansionNum), big2) - regionDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, regionDeltaEntropyTarget) + + var regionDeltaEntropyTarget *big.Int + if header.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + regionDeltaEntropyTarget = new(big.Int).Div(params.RegionEntropyTarget(expansionNum), big2) + regionDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, regionDeltaEntropyTarget) + } else { + regionDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, params.RegionEntropyTarget(expansionNum)) + regionDeltaEntropyTarget = new(big.Int).Div(regionDeltaEntropyTarget, big2) + } + regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdEntropy, common.BitsToBigBits(params.RegionEntropyTarget(expansionNum))) if intrinsicEntropy.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaEntropyRegion.Cmp(regionDeltaEntropyTarget) > 0 { chain.AddToCalcOrderCache(header.Hash(), common.REGION_CTX, intrinsicEntropy) diff --git a/consensus/progpow/poem.go b/consensus/progpow/poem.go index 4e19bc0f2b..0677860d48 100644 --- a/consensus/progpow/poem.go +++ b/consensus/progpow/poem.go @@ -43,8 +43,15 @@ func (progpow *Progpow) CalcOrder(chain consensus.BlockReader, header *types.Wor // the given header determines the prime block totalDeltaEntropyPrime := new(big.Int).Add(header.ParentDeltaEntropy(common.REGION_CTX), header.ParentDeltaEntropy(common.ZONE_CTX)) totalDeltaEntropyPrime = new(big.Int).Add(totalDeltaEntropyPrime, intrinsicEntropy) - primeDeltaEntropyTarget := new(big.Int).Div(params.PrimeEntropyTarget(expansionNum), big2) - primeDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, primeDeltaEntropyTarget) + + var primeDeltaEntropyTarget *big.Int + if header.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + primeDeltaEntropyTarget = new(big.Int).Div(params.PrimeEntropyTarget(expansionNum), big2) + primeDeltaEntropyTarget = new(big.Int).Mul(zoneThresholdEntropy, primeDeltaEntropyTarget) + } else { + primeDeltaEntropyTarget = new(big.Int).Mul(params.PrimeEntropyTarget(expansionNum), zoneThresholdEntropy) + primeDeltaEntropyTarget = new(big.Int).Div(primeDeltaEntropyTarget, big2) + } primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdEntropy, common.BitsToBigBits(params.PrimeEntropyTarget(expansionNum))) if intrinsicEntropy.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaEntropyPrime.Cmp(primeDeltaEntropyTarget) > 0 { @@ -55,8 +62,16 @@ func (progpow *Progpow) CalcOrder(chain consensus.BlockReader, header *types.Wor // REGION // Compute the total accumulated entropy since the last region block totalDeltaSRegion := new(big.Int).Add(header.ParentDeltaEntropy(common.ZONE_CTX), intrinsicEntropy) - regionDeltaSTarget := new(big.Int).Div(params.RegionEntropyTarget(expansionNum), big2) - regionDeltaSTarget = new(big.Int).Mul(zoneThresholdEntropy, regionDeltaSTarget) + + var regionDeltaSTarget *big.Int + if header.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + regionDeltaSTarget = new(big.Int).Div(params.RegionEntropyTarget(expansionNum), big2) + regionDeltaSTarget = new(big.Int).Mul(zoneThresholdEntropy, regionDeltaSTarget) + } else { + regionDeltaSTarget = new(big.Int).Mul(zoneThresholdEntropy, params.RegionEntropyTarget(expansionNum)) + regionDeltaSTarget = new(big.Int).Div(regionDeltaSTarget, big2) + } + regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdEntropy, common.BitsToBigBits(params.RegionEntropyTarget(expansionNum))) if intrinsicEntropy.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaSRegion.Cmp(regionDeltaSTarget) > 0 { chain.AddToCalcOrderCache(header.Hash(), common.REGION_CTX, intrinsicEntropy) From dbc77de2d98d429b3ce218726bc8efbd8243d57e Mon Sep 17 00:00:00 2001 From: gop Date: Fri, 8 Nov 2024 11:17:21 -0600 Subject: [PATCH 8/9] Updating the new workshares threshold diff to 4 after the second goldenage fork --- cmd/utils/flags.go | 2 +- consensus/blake3pow/poem.go | 8 +++++++- consensus/blake3pow/sealer.go | 7 ++++++- consensus/misc/rewards.go | 6 ++++++ consensus/progpow/poem.go | 8 +++++++- consensus/progpow/sealer.go | 6 +++++- core/headerchain.go | 7 ++++++- params/protocol_params.go | 3 ++- 8 files changed, 40 insertions(+), 7 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d0a0b45804..57600e6777 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1381,7 +1381,7 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, slicesRunning []com cfg.GenesisNonce = viper.GetUint64(GenesisNonce.Name) cfg.Miner.WorkShareMining = viper.GetBool(WorkShareMiningFlag.Name) - cfg.Miner.WorkShareThreshold = params.WorkSharesThresholdDiff + viper.GetInt(WorkShareThresholdFlag.Name) + cfg.Miner.WorkShareThreshold = params.NewWorkSharesThresholdDiff + viper.GetInt(WorkShareThresholdFlag.Name) if viper.GetString(WorkShareMinerEndpoints.Name) != "" { cfg.Miner.Endpoints = []string{viper.GetString(WorkShareMinerEndpoints.Name)} } diff --git a/consensus/blake3pow/poem.go b/consensus/blake3pow/poem.go index 6e06b1ad5b..56e5ed9a9f 100644 --- a/consensus/blake3pow/poem.go +++ b/consensus/blake3pow/poem.go @@ -296,7 +296,13 @@ func (blake3pow *Blake3pow) CalcRank(chain consensus.ChainHeaderReader, header * } func (blake3pow *Blake3pow) CheckIfValidWorkShare(workShare *types.WorkObjectHeader) types.WorkShareValidity { - if blake3pow.CheckWorkThreshold(workShare, params.WorkSharesThresholdDiff) { + var thresholdDiff int + if workShare.NumberU64() < params.GoldenAgeForkNumberV2 { + thresholdDiff = params.OldWorkSharesThresholdDiff + } else { + thresholdDiff = params.NewWorkSharesThresholdDiff + } + if blake3pow.CheckWorkThreshold(workShare, thresholdDiff) { return types.Valid } else if blake3pow.CheckWorkThreshold(workShare, blake3pow.config.WorkShareThreshold) { return types.Sub diff --git a/consensus/blake3pow/sealer.go b/consensus/blake3pow/sealer.go index 313e90ffcd..dcb088ccda 100644 --- a/consensus/blake3pow/sealer.go +++ b/consensus/blake3pow/sealer.go @@ -10,6 +10,7 @@ import ( "runtime/debug" "sync" + "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/consensus" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/log" @@ -120,7 +121,11 @@ func (blake3pow *Blake3pow) Seal(header *types.WorkObject, results chan<- *types } func (blake3pow *Blake3pow) Mine(header *types.WorkObject, abort <-chan struct{}, found chan *types.WorkObject) { - blake3pow.MineToThreshold(header, params.WorkSharesThresholdDiff, abort, found) + if header.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + blake3pow.MineToThreshold(header, params.OldWorkSharesThresholdDiff, abort, found) + } else { + blake3pow.MineToThreshold(header, params.NewWorkSharesThresholdDiff, abort, found) + } } func (blake3pow *Blake3pow) MineToThreshold(workObject *types.WorkObject, workShareThreshold int, abort <-chan struct{}, found chan *types.WorkObject) { diff --git a/consensus/misc/rewards.go b/consensus/misc/rewards.go index bceaf6c10b..34c0a3df66 100644 --- a/consensus/misc/rewards.go +++ b/consensus/misc/rewards.go @@ -23,6 +23,12 @@ func CalculateReward(parent *types.WorkObject, header *types.WorkObjectHeader) * reward = new(big.Int).Add(reward, new(big.Int).Div(reward, big.NewInt(70))) } + // Since after the second fork, the number of the workshares allowed is increased by 2x, + // the reward value is cut by half to keep the rate of inflation the same + if header.NumberU64() >= params.GoldenAgeForkNumberV2 { + reward = new(big.Int).Div(reward, common.Big2) + } + return reward } diff --git a/consensus/progpow/poem.go b/consensus/progpow/poem.go index 0677860d48..030a1f64ce 100644 --- a/consensus/progpow/poem.go +++ b/consensus/progpow/poem.go @@ -295,7 +295,13 @@ func (progpow *Progpow) CalcRank(chain consensus.ChainHeaderReader, header *type } func (progpow *Progpow) CheckIfValidWorkShare(workShare *types.WorkObjectHeader) types.WorkShareValidity { - if progpow.CheckWorkThreshold(workShare, params.WorkSharesThresholdDiff) { + var thresholdDiff int + if workShare.NumberU64() < params.GoldenAgeForkNumberV2 { + thresholdDiff = params.OldWorkSharesThresholdDiff + } else { + thresholdDiff = params.NewWorkSharesThresholdDiff + } + if progpow.CheckWorkThreshold(workShare, thresholdDiff) { return types.Valid } else if progpow.CheckWorkThreshold(workShare, progpow.config.WorkShareThreshold) { return types.Sub diff --git a/consensus/progpow/sealer.go b/consensus/progpow/sealer.go index f6421c4601..bbbcb87a89 100644 --- a/consensus/progpow/sealer.go +++ b/consensus/progpow/sealer.go @@ -121,7 +121,11 @@ func (progpow *Progpow) Seal(header *types.WorkObject, results chan<- *types.Wor } func (progpow *Progpow) Mine(workObject *types.WorkObject, abort <-chan struct{}, found chan *types.WorkObject) { - progpow.MineToThreshold(workObject, params.WorkSharesThresholdDiff, abort, found) + if workObject.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + progpow.MineToThreshold(workObject, params.OldWorkSharesThresholdDiff, abort, found) + } else { + progpow.MineToThreshold(workObject, params.NewWorkSharesThresholdDiff, abort, found) + } } func (progpow *Progpow) MineToThreshold(workObject *types.WorkObject, workShareThreshold int, abort <-chan struct{}, found chan *types.WorkObject) { diff --git a/core/headerchain.go b/core/headerchain.go index aff75c4aa3..a47312fc0c 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -1363,7 +1363,12 @@ func (hc *HeaderChain) GetMaxTxInWorkShare() uint64 { currentGasLimit := hc.CurrentHeader().GasLimit() maxEoaInBlock := currentGasLimit / params.TxGas // (maxEoaInBlock*2)/(2^bits) - return (maxEoaInBlock * 2) / uint64(math.Pow(2, float64(params.WorkSharesThresholdDiff))) + currentHeader := hc.CurrentHeader() + if currentHeader != nil && currentHeader.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + return (maxEoaInBlock * 2) / uint64(math.Pow(2, float64(params.OldWorkSharesThresholdDiff))) + } else { + return (maxEoaInBlock * 2) / uint64(math.Pow(2, float64(params.NewWorkSharesThresholdDiff))) + } } func (hc *HeaderChain) Database() ethdb.Database { diff --git a/params/protocol_params.go b/params/protocol_params.go index a3bca06670..4d9b9ce654 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -175,7 +175,8 @@ var ( DifficultyAdjustmentFactor int64 = 40 // This is the factor that divides the log of the change in the difficulty MinQuaiConversionAmount = new(big.Int).Mul(big.NewInt(10000000000), big.NewInt(GWei)) // 0.000000001 Quai MaxWorkShareCount = 16 - WorkSharesThresholdDiff = 3 // Number of bits lower than the target that the default consensus engine uses + OldWorkSharesThresholdDiff = 3 // Number of bits lower than the target that the default consensus engine uses + NewWorkSharesThresholdDiff = 4 // Number of bits lower than the target that the default consensus engine uses WorkSharesInclusionDepth = 3 // Number of blocks upto which the work shares can be referenced and this is protocol enforced LockupByteToBlockDepth = make(map[uint8]uint64) LockupByteToRewardsRatio = make(map[uint8]*big.Int) From cfb60bb5561367525d90043909876952a14e324e Mon Sep 17 00:00:00 2001 From: gop Date: Fri, 15 Nov 2024 10:49:46 -0600 Subject: [PATCH 9/9] Paying the miner the conversion fee after the fork --- core/state_processor.go | 61 +++++++++++++++++++++++++++++++---------- core/worker.go | 55 +++++++++++++++++++++++++------------ 2 files changed, 83 insertions(+), 33 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 281416a2cc..9c42f219f3 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -40,10 +40,10 @@ import ( "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/core/vm" "github.com/dominant-strategies/go-quai/crypto" + "github.com/dominant-strategies/go-quai/crypto/multiset" "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/event" "github.com/dominant-strategies/go-quai/log" - "github.com/dominant-strategies/go-quai/crypto/multiset" "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/trie" ) @@ -1210,12 +1210,25 @@ func ValidateQiTxOutputsAndSignature(tx *types.Transaction, chain ChainContext, if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && totalConvertQitOut.Cmp(types.Denominations[params.MinQiConversionDenomination]) < 0 { return nil, fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), totalConvertQitOut.Uint64()) } + + if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 { + // Since this transaction contains a conversion, check if the required conversion gas is paid + // The user must pay this to the miner now, but it is only added to the block gas limit when the ETX is played in the destination + requiredGas += params.QiToQuaiConversionGas + minimumFeeInQuai = new(big.Int).Mul(new(big.Int).SetUint64(requiredGas), currentHeader.BaseFee()) + if txFeeInQuai.Cmp(minimumFeeInQuai) < 0 { + return nil, fmt.Errorf("tx %032x has insufficient fee for base fee * gas, have %d want %d", tx.Hash(), txFeeInQit.Uint64(), minimumFeeInQuai.Uint64()) + } + } ETXPCount++ if ETXPCount > etxPLimit { return nil, fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, etxPLimit) } usedGas += params.ETXGas - txFeeInQit.Sub(txFeeInQit, txFeeInQit) // Fee goes entirely to gas to pay for conversion + + if currentHeader.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + txFeeInQit.Sub(txFeeInQit, txFeeInQit) // Fee goes entirely to gas to pay for conversion + } } if usedGas > currentHeader.GasLimit() { @@ -1448,25 +1461,43 @@ func ProcessQiTx(tx *types.Transaction, chain ChainContext, checkSig bool, isFir if currentHeader.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && totalConvertQitOut.Cmp(types.Denominations[params.MinQiConversionDenomination]) < 0 { return nil, nil, nil, fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), totalConvertQitOut.Uint64()), nil } - // Since this transaction contains a conversion, the rest of the tx gas is given to conversion - remainingTxFeeInQuai := misc.QiToQuai(parent, txFeeInQit) - // Fee is basefee * gas, so gas remaining is fee remaining / basefee - remainingGas := new(big.Int).Div(remainingTxFeeInQuai, currentHeader.BaseFee()) - if remainingGas.Uint64() > (currentHeader.GasLimit() / params.MinimumEtxGasDivisor) { - // Limit ETX gas to max ETX gas limit (the rest is burned) - remainingGas = new(big.Int).SetUint64(currentHeader.GasLimit() / params.MinimumEtxGasDivisor) - } - ETXPCount++ - if ETXPCount > *etxPLimit { - return nil, nil, nil, fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, etxPLimit), nil + var etxInner types.ExternalTx + if currentHeader.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + // Since this transaction contains a conversion, the rest of the tx gas is given to conversion + remainingTxFeeInQuai := misc.QiToQuai(parent, txFeeInQit) + // Fee is basefee * gas, so gas remaining is fee remaining / basefee + remainingGas := new(big.Int).Div(remainingTxFeeInQuai, currentHeader.BaseFee()) + if remainingGas.Uint64() > (currentHeader.GasLimit() / params.MinimumEtxGasDivisor) { + // Limit ETX gas to max ETX gas limit (the rest is burned) + remainingGas = new(big.Int).SetUint64(currentHeader.GasLimit() / params.MinimumEtxGasDivisor) + } + ETXPCount++ + if ETXPCount > *etxPLimit { + return nil, nil, nil, fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, etxPLimit), nil + } + etxInner = types.ExternalTx{Value: totalConvertQitOut, To: &convertAddress, Sender: common.ZeroAddress(location), EtxType: types.ConversionType, OriginatingTxHash: tx.Hash(), Gas: remainingGas.Uint64()} // Value is in Qits not Denomination + } else { + // Since this transaction contains a conversion, check if the required conversion gas is paid + // The user must pay this to the miner now, but it is only added to the block gas limit when the ETX is played in the destination + requiredGas += params.QiToQuaiConversionGas + minimumFeeInQuai = new(big.Int).Mul(new(big.Int).SetUint64(requiredGas), currentHeader.BaseFee()) + if txFeeInQuai.Cmp(minimumFeeInQuai) < 0 { + return nil, nil, nil, fmt.Errorf("tx %032x has insufficient fee for base fee * gas: %d, have %d want %d", tx.Hash(), requiredGas, txFeeInQit.Uint64(), minimumFeeInQuai.Uint64()), nil + } + ETXPCount++ + if ETXPCount > *etxPLimit { + return nil, nil, nil, fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, etxPLimit), nil + } + etxInner = types.ExternalTx{Value: totalConvertQitOut, To: &convertAddress, Sender: common.ZeroAddress(location), EtxType: types.ConversionType, OriginatingTxHash: tx.Hash(), Gas: 0} // Value is in Qits not Denomination } - etxInner := types.ExternalTx{Value: totalConvertQitOut, To: &convertAddress, Sender: common.ZeroAddress(location), EtxType: types.ConversionType, OriginatingTxHash: tx.Hash(), Gas: remainingGas.Uint64()} // Value is in Qits not Denomination *usedGas += params.ETXGas if err := gp.SubGas(params.ETXGas); err != nil { return nil, nil, nil, err, nil } etxs = append(etxs, &etxInner) - txFeeInQit.Sub(txFeeInQit, txFeeInQit) // Fee goes entirely to gas to pay for conversion + if currentHeader.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + txFeeInQit.Sub(txFeeInQit, txFeeInQit) // Fee goes entirely to gas to pay for conversion + } } elapsedTime = time.Since(stepStart) stepTimings["Fee Verification"] = elapsedTime diff --git a/core/worker.go b/core/worker.go index 4850576325..4b2a14141e 100644 --- a/core/worker.go +++ b/core/worker.go @@ -2187,29 +2187,48 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment, primeTermi if env.wo.NumberU64(common.ZONE_CTX) >= params.GoldenAgeForkNumberV2 && totalConvertQitOut.Cmp(types.Denominations[params.MinQiConversionDenomination]) < 0 { return fmt.Errorf("tx %032x emits convert UTXO with value %d less than minimum conversion denomination", tx.Hash(), totalConvertQitOut.Uint64()) } - // Since this transaction contains a conversion, the rest of the tx gas is given to conversion - remainingTxFeeInQuai := misc.QiToQuai(parent, txFeeInQit) - // Fee is basefee * gas, so gas remaining is fee remaining / basefee - remainingGas := new(big.Int).Div(remainingTxFeeInQuai, env.wo.BaseFee()) - if remainingGas.Uint64() > (env.wo.GasLimit() / params.MinimumEtxGasDivisor) { - // Limit ETX gas to max ETX gas limit (the rest is burned) - remainingGas = new(big.Int).SetUint64(env.wo.GasLimit() / params.MinimumEtxGasDivisor) - } - if remainingGas.Uint64() < params.TxGas { - // Minimum gas for ETX is TxGas - return fmt.Errorf("tx %032x has insufficient remaining gas for conversion ETX, have %d want %d", tx.Hash(), remainingGas.Uint64(), params.TxGas) - } - ETXPCount++ // conversion is technically a cross-prime ETX - if ETXPCount > env.etxPLimit { - return fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, env.etxPLimit) - } - etxInner := types.ExternalTx{Value: totalConvertQitOut, To: &convertAddress, Sender: common.ZeroAddress(location), EtxType: types.ConversionType, OriginatingTxHash: tx.Hash(), Gas: remainingGas.Uint64()} // Value is in Qits not Denomination + var etxInner types.ExternalTx + if env.wo.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + // Since this transaction contains a conversion, the rest of the tx gas is given to conversion + remainingTxFeeInQuai := misc.QiToQuai(parent, txFeeInQit) + // Fee is basefee * gas, so gas remaining is fee remaining / basefee + remainingGas := new(big.Int).Div(remainingTxFeeInQuai, env.wo.BaseFee()) + if remainingGas.Uint64() > (env.wo.GasLimit() / params.MinimumEtxGasDivisor) { + // Limit ETX gas to max ETX gas limit (the rest is burned) + remainingGas = new(big.Int).SetUint64(env.wo.GasLimit() / params.MinimumEtxGasDivisor) + } + if remainingGas.Uint64() < params.TxGas { + // Minimum gas for ETX is TxGas + return fmt.Errorf("tx %032x has insufficient remaining gas for conversion ETX, have %d want %d", tx.Hash(), remainingGas.Uint64(), params.TxGas) + } + ETXPCount++ // conversion is technically a cross-prime ETX + if ETXPCount > env.etxPLimit { + return fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, env.etxPLimit) + } + etxInner = types.ExternalTx{Value: totalConvertQitOut, To: &convertAddress, Sender: common.ZeroAddress(location), EtxType: types.ConversionType, OriginatingTxHash: tx.Hash(), Gas: remainingGas.Uint64()} // Value is in Qits not Denomination + } else { + // Since this transaction contains a conversion, check if the required conversion gas is paid + // The user must pay this to the miner now, but it is only added to the block gas limit when the ETX is played in the destination + requiredGas += params.QiToQuaiConversionGas + minimumFeeInQuai = new(big.Int).Mul(new(big.Int).SetUint64(requiredGas), env.wo.BaseFee()) + if txFeeInQuai.Cmp(minimumFeeInQuai) < 0 { + return fmt.Errorf("tx %032x has insufficient fee for base fee * gas, have %d want %d", tx.Hash(), txFeeInQit.Uint64(), minimumFeeInQuai.Uint64()) + } + ETXPCount++ // conversion is technically a cross-prime ETX + if ETXPCount > env.etxPLimit { + return fmt.Errorf("tx [%v] emits too many cross-prime ETXs for block. emitted: %d, limit: %d", tx.Hash().Hex(), ETXPCount, env.etxPLimit) + } + // Value is in Qits not Denomination + etxInner = types.ExternalTx{Value: totalConvertQitOut, To: &convertAddress, Sender: common.ZeroAddress(location), EtxType: types.ConversionType, OriginatingTxHash: tx.Hash(), Gas: 0} // Conversion gas is paid from the converted Quai balance (for new account creation, when redeemed) + } gasUsed += params.ETXGas if err := env.gasPool.SubGas(params.ETXGas); err != nil { return err } etxs = append(etxs, &etxInner) - txFeeInQit.Sub(txFeeInQit, txFeeInQit) // Fee goes entirely to gas to pay for conversion + if env.wo.NumberU64(common.ZONE_CTX) < params.GoldenAgeForkNumberV2 { + txFeeInQit.Sub(txFeeInQit, txFeeInQit) // Fee goes entirely to gas to pay for conversion + } } env.wo.Header().SetGasUsed(gasUsed) env.etxRLimit -= ETXRCount