From 34aa1e7b35b46542448f993c79626a3f4ce15638 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Tue, 29 Aug 2023 19:06:30 +0200 Subject: [PATCH 01/25] fix typo --- CodingConventions.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CodingConventions.md b/CodingConventions.md index 8fcd1545f78..d1888cea867 100644 --- a/CodingConventions.md +++ b/CodingConventions.md @@ -102,7 +102,7 @@ happy path is either Therefore, changing the set of specified sentinel errors is generally considered a breaking API change. -2. **All errors beyond the specified, benign sentinel errors ere considered unexpected failures, i.e. a symptom for potential state corruption.** +2. **All errors beyond the specified, benign sentinel errors are considered unexpected failures, i.e. a symptom of potential state corruption.** * We employ a fundamental principle of [High Assurance Software Engineering](https://www.researchgate.net/publication/228563190_High_Assurance_Software_Development), where we treat everything beyond the known benign errors as critical failures. In unexpected failure cases, we assume that the vertex's in-memory state has been broken and proper functioning is no longer guaranteed. The only safe route of recovery is to restart the vertex from a previously persisted, safe state. From 8dfdd2251372d1c00ee472b3b501c3c846ccfc41 Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Tue, 29 Aug 2023 19:08:03 +0200 Subject: [PATCH 02/25] fix typos --- flips/network-api.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flips/network-api.md b/flips/network-api.md index b4307f1936a..a2caa57f54d 100644 --- a/flips/network-api.md +++ b/flips/network-api.md @@ -78,9 +78,9 @@ When the message is dequeued, the engine should check the `Context` to see wheth These can be combined into a [single context](https://github.com/teivah/onecontext) which can be used by the message processing business logic, so that the processing can be cancelled either by the network or by the engine. This will allow us to deprecate [`engine.Unit`](https://github.com/onflow/flow-go/blob/master/engine/unit.go), which uses a single `Context` for the entire engine. -There are certain types of messages (e.g block proposals) which may transit between the private and public networks via relay nodes (e.g Access Nodes). Libp2p's [default message ID function](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pubsub.go#L1040-L1043) will treat a message originating from one network, relayed to the other network by `n` distinct relay nodes, as `n` distinct messages, causing unnacceptable message duplification / traffic amplification. In order to prevent this, we will need to define a [custom message ID function](https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub#WithMessageIdFn) which returns the hash of the message [`Payload`](https://github.com/onflow/flow-go/blob/698c77460bc33d1a8ee8a154f7fe4877bc518a02/network/message/message.proto#L13). +There are certain types of messages (e.g block proposals) which may transit between the private and public networks via relay nodes (e.g Access Nodes). Libp2p's [default message ID function](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pubsub.go#L1040-L1043) will treat a message originating from one network, relayed to the other network by `n` distinct relay nodes, as `n` distinct messages, causing unacceptable message duplification / traffic amplification. In order to prevent this, we will need to define a [custom message ID function](https://pkg.go.dev/github.com/libp2p/go-libp2p-pubsub#WithMessageIdFn) which returns the hash of the message [`Payload`](https://github.com/onflow/flow-go/blob/698c77460bc33d1a8ee8a154f7fe4877bc518a02/network/message/message.proto#L13). -In order to avoid making the message ID function deserialize the `Message` to access the `Payload`, we need to remove all other fields from the `Message` protobuf so that the message ID function can simply take the hash of the the pubsub [`Data`](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pb/rpc.pb.go#L145) field without needing to do any deserialization. +In order to avoid making the message ID function deserialize the `Message` to access the `Payload`, we need to remove all other fields from the `Message` protobuf so that the message ID function can simply take the hash of the pubsub [`Data`](https://github.com/libp2p/go-libp2p-pubsub/blob/0c7092d1f50091ae88407ba93103ac5868da3d0a/pb/rpc.pb.go#L145) field without needing to do any deserialization. The `Multicast` implementation will need to be changed to make direct connections to the target peers instead of sending messages with a `TargetIDs` field via gossip. @@ -90,4 +90,4 @@ The `Multicast` implementation will need to be changed to make direct connection - Since existing calls to `Multicast` only target 3 peers, changing the implementation to use direct connections instead of gossip will reduce traffic on the network and make it more efficient. - While `engine.Unit` provides some useful functionalities, it also uses the anti-pattern of [storing a `Context` inside a struct](https://github.com/onflow/flow-go/blob/b50f0ffe054103a82e4aa9e0c9e4610c2cbf2cc9/engine/unit.go#L117), something which is [specifically advised against](https://pkg.go.dev/context#:~:text=Do%20not%20store%20Contexts%20inside%20a%20struct%20type%3B%20instead%2C%20pass%20a%20Context%20explicitly%20to%20each%20function%20that%20needs%20it.%20The%20Context%20should%20be%20the%20first%20parameter%2C%20typically%20named%20ctx%3A) by [the developers of Go](https://go.dev/blog/context-and-structs#TOC_2.). Here is an [example](https://go.dev/blog/context-and-structs#:~:text=Storing%20context%20in%20structs%20leads%20to%20confusion) illustrating some of the problems with this approach. -## Implementation (TODO) \ No newline at end of file +## Implementation (TODO) From 8cacc173000b34a1c93b50a2c3e64e73399a05af Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Tue, 29 Aug 2023 19:08:47 +0200 Subject: [PATCH 03/25] fix typo --- state/fork/Readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/state/fork/Readme.md b/state/fork/Readme.md index 09938f46bf0..3054d2d4bd9 100644 --- a/state/fork/Readme.md +++ b/state/fork/Readme.md @@ -9,7 +9,7 @@ The traversal the walks `head <--> lowestBlock` (in either direction). There are a variety of ways to precisely specify `head` and `lowestBlock`: * At least one block, `head` or `lowestBlock`, must be specified by its ID to unambiguously identify the fork that should be traversed. - * The other block an either be specified by ID or height. + * The other block can either be specified by ID or height. * If both `head` and `lowestBlock` are specified by their ID, they must both be on the same fork. From 0817a784bd73f6aa3b68ce9720a1ef8f254a8d1d Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 6 Sep 2023 12:31:54 -0700 Subject: [PATCH 04/25] clean up ingestione engine by removing unused dependencies --- cmd/execution_builder.go | 3 --- engine/execution/ingestion/engine.go | 9 --------- engine/execution/ingestion/engine_test.go | 13 ------------- engine/testutil/nodes.go | 3 --- 4 files changed, 28 deletions(-) diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 01d22346437..4224203205e 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -874,9 +874,6 @@ func (exeNode *ExecutionNode) LoadIngestionEngine( node.Storage.Headers, node.Storage.Blocks, node.Storage.Collections, - exeNode.events, - exeNode.serviceEvents, - exeNode.txResults, exeNode.computationManager, exeNode.providerEngine, exeNode.executionState, diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 53ed58c99c6..9a4dbf2611a 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -46,9 +46,6 @@ type Engine struct { headers storage.Headers // see comments on getHeaderByHeight for why we need it blocks storage.Blocks collections storage.Collections - events storage.Events - serviceEvents storage.ServiceEvents - transactionResults storage.TransactionResults computationManager computation.ComputationManager providerEngine provider.ProviderEngine mempool *Mempool @@ -81,9 +78,6 @@ func New( headers storage.Headers, blocks storage.Blocks, collections storage.Collections, - events storage.Events, - serviceEvents storage.ServiceEvents, - transactionResults storage.TransactionResults, executionEngine computation.ComputationManager, providerEngine provider.ProviderEngine, execState state.ExecutionState, @@ -109,9 +103,6 @@ func New( headers: headers, blocks: blocks, collections: collections, - events: events, - serviceEvents: serviceEvents, - transactionResults: transactionResults, computationManager: executionEngine, providerEngine: providerEngine, mempool: mempool, diff --git a/engine/execution/ingestion/engine_test.go b/engine/execution/ingestion/engine_test.go index 541063b50a6..4a1cbadb02d 100644 --- a/engine/execution/ingestion/engine_test.go +++ b/engine/execution/ingestion/engine_test.go @@ -154,9 +154,6 @@ func runWithEngine(t *testing.T, f func(testingContext)) { blocks := storage.NewMockBlocks(ctrl) payloads := storage.NewMockPayloads(ctrl) collections := storage.NewMockCollections(ctrl) - events := storage.NewMockEvents(ctrl) - serviceEvents := storage.NewMockServiceEvents(ctrl) - txResults := storage.NewMockTransactionResults(ctrl) computationManager := new(computation.ComputationManager) providerEngine := new(provider.ProviderEngine) @@ -188,7 +185,6 @@ func runWithEngine(t *testing.T, f func(testingContext)) { return identity }, nil) - txResults.EXPECT().BatchStore(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() payloads.EXPECT().Store(gomock.Any(), gomock.Any()).AnyTimes() log := unittest.Logger() @@ -229,9 +225,6 @@ func runWithEngine(t *testing.T, f func(testingContext)) { headers, blocks, collections, - events, - serviceEvents, - txResults, computationManager, providerEngine, executionState, @@ -1507,9 +1500,6 @@ func newIngestionEngine(t *testing.T, ps *mocks.ProtocolState, es *mockExecution headers := storage.NewMockHeaders(ctrl) blocks := storage.NewMockBlocks(ctrl) collections := storage.NewMockCollections(ctrl) - events := storage.NewMockEvents(ctrl) - serviceEvents := storage.NewMockServiceEvents(ctrl) - txResults := storage.NewMockTransactionResults(ctrl) computationManager := new(computation.ComputationManager) providerEngine := new(provider.ProviderEngine) @@ -1529,9 +1519,6 @@ func newIngestionEngine(t *testing.T, ps *mocks.ProtocolState, es *mockExecution headers, blocks, collections, - events, - serviceEvents, - txResults, computationManager, providerEngine, es, diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 4e38cefc458..9ae2f42175f 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -720,9 +720,6 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit node.Headers, node.Blocks, collectionsStorage, - eventsStorage, - serviceEventsStorage, - txResultStorage, computationEngine, pusherEngine, execState, From 535fd29aaeb8101711dbac1e17879d32043098d7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 15 Aug 2023 14:46:53 -0700 Subject: [PATCH 05/25] add cmd checkpoint trie stats --- cmd/util/cmd/checkpoint-trie-stats/cmd.go | 110 ++++++++++++++++++++++ ledger/complete/mtrie/trie/trie.go | 28 ++++++ 2 files changed, 138 insertions(+) create mode 100644 cmd/util/cmd/checkpoint-trie-stats/cmd.go diff --git a/cmd/util/cmd/checkpoint-trie-stats/cmd.go b/cmd/util/cmd/checkpoint-trie-stats/cmd.go new file mode 100644 index 00000000000..7958fa73103 --- /dev/null +++ b/cmd/util/cmd/checkpoint-trie-stats/cmd.go @@ -0,0 +1,110 @@ +package checkpoint_trie_stats + +import ( + "errors" + "fmt" + + "github.com/onflow/flow-go/ledger/complete/mtrie/node" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" + "github.com/onflow/flow-go/ledger/complete/wal" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +var ( + flagCheckpoint string + flagTrieIndex int +) + +var Cmd = &cobra.Command{ + Use: "checkpoint-trie-stats", + Short: "List the trie node count by types in a checkpoint, show total payload size", + Run: run, +} + +func init() { + + Cmd.Flags().StringVar(&flagCheckpoint, "checkpoint", "", + "checkpoint file to read") + _ = Cmd.MarkFlagRequired("checkpoint") + Cmd.Flags().IntVar(&flagTrieIndex, "trie-index", 0, "trie index to read, 0 being the first trie, -1 is the last trie") + +} + +func run(*cobra.Command, []string) { + + log.Info().Msgf("loading checkpoint %v, reading %v-th trie", flagCheckpoint, flagTrieIndex) + res, err := scanCheckpoint(flagCheckpoint, flagTrieIndex, log.Logger) + if err != nil { + log.Fatal().Err(err).Msg("fail to scan checkpoint") + } + log.Info(). + Str("TrieRootHash", res.trieRootHash). + Int("InterimNodeCount", res.interimNodeCount). + Int("LeafNodeCount", res.leafNodeCount). + Int("TotalPayloadSize", res.totalPayloadSize). + Msgf("successfully scanned checkpoint %v", flagCheckpoint) +} + +type result struct { + trieRootHash string + interimNodeCount int + leafNodeCount int + totalPayloadSize int +} + +func readTrie(tries []*trie.MTrie, index int) (*trie.MTrie, error) { + if len(tries) == 0 { + return nil, errors.New("No tries available") + } + + if index < -len(tries) || index >= len(tries) { + return nil, fmt.Errorf("index %d out of range", index) + } + + if index < 0 { + return tries[len(tries)+index], nil + } + + return tries[index], nil +} + +func scanCheckpoint(checkpoint string, trieIndex int, log zerolog.Logger) (result, error) { + tries, err := wal.LoadCheckpoint(flagCheckpoint, &log) + if err != nil { + return result{}, fmt.Errorf("error while loading checkpoint: %w", err) + } + + log.Info().Msgf("checkpoint loaded, total tries: %v", len(tries)) + + t, err := readTrie(tries, trieIndex) + if err != nil { + return result{}, fmt.Errorf("error while reading trie: %w", err) + } + + log.Info().Msgf("trie loaded, root hash: %v", t.RootHash()) + + res := &result{ + trieRootHash: t.RootHash().String(), + interimNodeCount: 0, + leafNodeCount: 0, + totalPayloadSize: 0, + } + processNode := func(n *node.Node) error { + if n.IsLeaf() { + res.leafNodeCount++ + res.totalPayloadSize += n.Payload().Size() + } else { + res.interimNodeCount++ + } + return nil + } + + err = trie.TraverseNodes(t, processNode) + if err != nil { + return result{}, fmt.Errorf("fail to traverse the trie: %w", err) + } + + return *res, nil +} diff --git a/ledger/complete/mtrie/trie/trie.go b/ledger/complete/mtrie/trie/trie.go index b2ec3106f5d..7799c001104 100644 --- a/ledger/complete/mtrie/trie/trie.go +++ b/ledger/complete/mtrie/trie/trie.go @@ -831,3 +831,31 @@ func minInt(a, b int) int { } return b } + +// TraverseNodes traverses all nodes of the trie in DFS order +func TraverseNodes(trie *MTrie, processNode func(*node.Node) error) error { + return traverseRecursive(trie.root, processNode) +} + +func traverseRecursive(n *node.Node, processNode func(*node.Node) error) error { + if n == nil { + return nil + } + + err := processNode(n) + if err != nil { + return err + } + + err = traverseRecursive(n.LeftChild(), processNode) + if err != nil { + return err + } + + err = traverseRecursive(n.RightChild(), processNode) + if err != nil { + return err + } + + return nil +} From 7637e7963dedfb64bf36af11f7ea758fcb09018f Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Thu, 17 Aug 2023 09:30:58 -0700 Subject: [PATCH 06/25] Apply suggestions from code review Co-authored-by: Janez Podhostnik <67895329+janezpodhostnik@users.noreply.github.com> --- cmd/util/cmd/checkpoint-trie-stats/cmd.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/util/cmd/checkpoint-trie-stats/cmd.go b/cmd/util/cmd/checkpoint-trie-stats/cmd.go index 7958fa73103..d17f5bce4c2 100644 --- a/cmd/util/cmd/checkpoint-trie-stats/cmd.go +++ b/cmd/util/cmd/checkpoint-trie-stats/cmd.go @@ -76,7 +76,9 @@ func scanCheckpoint(checkpoint string, trieIndex int, log zerolog.Logger) (resul return result{}, fmt.Errorf("error while loading checkpoint: %w", err) } - log.Info().Msgf("checkpoint loaded, total tries: %v", len(tries)) + log.Info(). + Int("total_tries", len(tries)). + Msg("checkpoint loaded") t, err := readTrie(tries, trieIndex) if err != nil { From 6d0f8390cd086e45f1d80546ac681ca4f0a5f6d2 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 17 Aug 2023 09:31:13 -0700 Subject: [PATCH 07/25] fix linting --- cmd/util/cmd/checkpoint-trie-stats/cmd.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/util/cmd/checkpoint-trie-stats/cmd.go b/cmd/util/cmd/checkpoint-trie-stats/cmd.go index d17f5bce4c2..08b14c39a1c 100644 --- a/cmd/util/cmd/checkpoint-trie-stats/cmd.go +++ b/cmd/util/cmd/checkpoint-trie-stats/cmd.go @@ -4,12 +4,13 @@ import ( "errors" "fmt" - "github.com/onflow/flow-go/ledger/complete/mtrie/node" - "github.com/onflow/flow-go/ledger/complete/mtrie/trie" - "github.com/onflow/flow-go/ledger/complete/wal" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" + + "github.com/onflow/flow-go/ledger/complete/mtrie/node" + "github.com/onflow/flow-go/ledger/complete/mtrie/trie" + "github.com/onflow/flow-go/ledger/complete/wal" ) var ( From b91ac2fdfce5d9135258f7aeb8bce75fd751538e Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 7 Sep 2023 10:32:50 -0700 Subject: [PATCH 08/25] fix lint --- cmd/util/cmd/checkpoint-trie-stats/cmd.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/util/cmd/checkpoint-trie-stats/cmd.go b/cmd/util/cmd/checkpoint-trie-stats/cmd.go index 08b14c39a1c..62124e9c285 100644 --- a/cmd/util/cmd/checkpoint-trie-stats/cmd.go +++ b/cmd/util/cmd/checkpoint-trie-stats/cmd.go @@ -78,8 +78,8 @@ func scanCheckpoint(checkpoint string, trieIndex int, log zerolog.Logger) (resul } log.Info(). - Int("total_tries", len(tries)). - Msg("checkpoint loaded") + Int("total_tries", len(tries)). + Msg("checkpoint loaded") t, err := readTrie(tries, trieIndex) if err != nil { From 2d3c54cab790557b6bd12f4aed0ec37196cd3490 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 8 Sep 2023 08:55:07 -0700 Subject: [PATCH 09/25] fix linting --- cmd/util/cmd/checkpoint-trie-stats/cmd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/checkpoint-trie-stats/cmd.go b/cmd/util/cmd/checkpoint-trie-stats/cmd.go index 62124e9c285..327a4cf037b 100644 --- a/cmd/util/cmd/checkpoint-trie-stats/cmd.go +++ b/cmd/util/cmd/checkpoint-trie-stats/cmd.go @@ -72,7 +72,7 @@ func readTrie(tries []*trie.MTrie, index int) (*trie.MTrie, error) { } func scanCheckpoint(checkpoint string, trieIndex int, log zerolog.Logger) (result, error) { - tries, err := wal.LoadCheckpoint(flagCheckpoint, &log) + tries, err := wal.LoadCheckpoint(flagCheckpoint, log) if err != nil { return result{}, fmt.Errorf("error while loading checkpoint: %w", err) } From c79f19ceba8af92b5fa8454a36278fd73a6e07b8 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 11:51:56 -0700 Subject: [PATCH 10/25] adds peer.ID cache --- utils/logging/internal/peerIdCache.go | 62 +++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 utils/logging/internal/peerIdCache.go diff --git a/utils/logging/internal/peerIdCache.go b/utils/logging/internal/peerIdCache.go new file mode 100644 index 00000000000..00ca1520d0b --- /dev/null +++ b/utils/logging/internal/peerIdCache.go @@ -0,0 +1,62 @@ +package internal + +import ( + "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" + herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" + "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/metrics" +) + +var _ flow.Entity = (*peerIdCacheEntry)(nil) + +type PeerIdCache struct { + peerCache *stdmap.Backend +} + +func NewPeerIdCache(size uint32) *PeerIdCache { + return &PeerIdCache{ + peerCache: stdmap.NewBackend( + stdmap.WithBackData( + herocache.NewCache( + size, + herocache.DefaultOversizeFactor, + heropool.LRUEjection, + zerolog.Nop(), + metrics.NewNoopCollector()))), + } +} + +func (p *PeerIdCache) PeerIdString(pid peer.ID) string { + id := flow.MakeIDFromFingerPrint([]byte(pid)) + pidEntity, ok := p.peerCache.ByID(id) + if !ok { + return pidEntity.(peerIdCacheEntry).Str + } + pidEntity = peerIdCacheEntry{ + id: id, + Pid: pid, + Str: pid.String(), + } + + p.peerCache.Add(pidEntity) + + return pidEntity.(peerIdCacheEntry).Str +} + +type peerIdCacheEntry struct { + id flow.Identifier // cache the id for fast lookup + Pid peer.ID // peer id + Str string // base58 encoded peer id string +} + +func (p peerIdCacheEntry) ID() flow.Identifier { + return p.id +} + +func (p peerIdCacheEntry) Checksum() flow.Identifier { + return p.id +} From c16be25894db37362fd2eeb2dcbf4d5cedb323a6 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 12:05:06 -0700 Subject: [PATCH 11/25] moves to p2plogging and write tests --- .../p2p/p2plogging}/internal/peerIdCache.go | 3 +- .../p2plogging/internal/peerIdCache_test.go | 40 +++++++++++++++++++ network/p2p/p2plogging/logging.go | 23 +++++++++++ 3 files changed, 65 insertions(+), 1 deletion(-) rename {utils/logging => network/p2p/p2plogging}/internal/peerIdCache.go (96%) create mode 100644 network/p2p/p2plogging/internal/peerIdCache_test.go create mode 100644 network/p2p/p2plogging/logging.go diff --git a/utils/logging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go similarity index 96% rename from utils/logging/internal/peerIdCache.go rename to network/p2p/p2plogging/internal/peerIdCache.go index 00ca1520d0b..c917b91a8ed 100644 --- a/utils/logging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -33,7 +33,8 @@ func NewPeerIdCache(size uint32) *PeerIdCache { func (p *PeerIdCache) PeerIdString(pid peer.ID) string { id := flow.MakeIDFromFingerPrint([]byte(pid)) pidEntity, ok := p.peerCache.ByID(id) - if !ok { + if ok { + // return the cached peer id string return pidEntity.(peerIdCacheEntry).Str } pidEntity = peerIdCacheEntry{ diff --git a/network/p2p/p2plogging/internal/peerIdCache_test.go b/network/p2p/p2plogging/internal/peerIdCache_test.go new file mode 100644 index 00000000000..efac92c08c8 --- /dev/null +++ b/network/p2p/p2plogging/internal/peerIdCache_test.go @@ -0,0 +1,40 @@ +package internal_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/network/p2p/p2plogging/internal" + p2ptest "github.com/onflow/flow-go/network/p2p/test" +) + +func TestNewPeerIdCache(t *testing.T) { + cacheSize := uint32(100) + cache := internal.NewPeerIdCache(cacheSize) + assert.NotNil(t, cache) +} + +func TestPeerIdCache_PeerIdString(t *testing.T) { + cacheSize := uint32(100) + cache := internal.NewPeerIdCache(cacheSize) + + t.Run("existing peer ID", func(t *testing.T) { + pid := p2ptest.PeerIdFixture(t) + pidStr := cache.PeerIdString(pid) + + assert.NotEmpty(t, pidStr) + assert.Equal(t, pid.String(), pidStr) + }) + + t.Run("non-existing peer ID", func(t *testing.T) { + pid1 := p2ptest.PeerIdFixture(t) + pid2 := p2ptest.PeerIdFixture(t) + + cache.PeerIdString(pid1) + pidStr := cache.PeerIdString(pid2) + + assert.NotEmpty(t, pidStr) + assert.Equal(t, pid2.String(), pidStr) + }) +} diff --git a/network/p2p/p2plogging/logging.go b/network/p2p/p2plogging/logging.go new file mode 100644 index 00000000000..e4f2e93ad7d --- /dev/null +++ b/network/p2p/p2plogging/logging.go @@ -0,0 +1,23 @@ +package p2plogging + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/network/p2p/p2plogging/internal" +) + +// peerIdCache is a global cache of peer ids, it is used to avoid expensive base58 encoding of peer ids. +var peerIdCache *internal.PeerIdCache + +// init is called before the package is initialized. This is used to initialize +// the peer id cache before any other code is run, so that the cache is ready +// to use. +func init() { + peerIdCache = internal.NewPeerIdCache(10_000) +} + +// PeerId is a logger helper that returns the base58 encoded peer id string, it looks up the peer id in a cache to avoid +// expensive base58 encoding, and caches the result for future use in case of a cache miss. +func PeerId(pid peer.ID) string { + return peerIdCache.PeerIdString(pid) +} From e4fbb5acdb2f6370e270278a3d9180a1743c62ba Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 12:17:12 -0700 Subject: [PATCH 12/25] adds ejection test --- .../p2p/p2plogging/internal/peerIdCache.go | 14 +++++ .../p2plogging/internal/peerIdCache_test.go | 62 ++++++++++++++++++- 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go index c917b91a8ed..f5655a73756 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -48,6 +48,20 @@ func (p *PeerIdCache) PeerIdString(pid peer.ID) string { return pidEntity.(peerIdCacheEntry).Str } +func (p *PeerIdCache) Size() uint { + return p.peerCache.Size() +} + +func (p *PeerIdCache) ByPeerId(pid peer.ID) (peer.ID, bool) { + id := flow.MakeIDFromFingerPrint([]byte(pid)) + pidEntity, ok := p.peerCache.ByID(id) + if ok { + // return the cached peer id + return pidEntity.(peerIdCacheEntry).Pid, true + } + return "", false +} + type peerIdCacheEntry struct { id flow.Identifier // cache the id for fast lookup Pid peer.ID // peer id diff --git a/network/p2p/p2plogging/internal/peerIdCache_test.go b/network/p2p/p2plogging/internal/peerIdCache_test.go index efac92c08c8..13279276a95 100644 --- a/network/p2p/p2plogging/internal/peerIdCache_test.go +++ b/network/p2p/p2plogging/internal/peerIdCache_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/p2p/p2plogging/internal" p2ptest "github.com/onflow/flow-go/network/p2p/test" ) @@ -22,9 +23,12 @@ func TestPeerIdCache_PeerIdString(t *testing.T) { t.Run("existing peer ID", func(t *testing.T) { pid := p2ptest.PeerIdFixture(t) pidStr := cache.PeerIdString(pid) - assert.NotEmpty(t, pidStr) assert.Equal(t, pid.String(), pidStr) + + gotPid, ok := cache.ByPeerId(pid) + assert.True(t, ok, "expected pid to be in the cache") + assert.Equal(t, pid.String(), gotPid.String()) }) t.Run("non-existing peer ID", func(t *testing.T) { @@ -33,8 +37,62 @@ func TestPeerIdCache_PeerIdString(t *testing.T) { cache.PeerIdString(pid1) pidStr := cache.PeerIdString(pid2) - assert.NotEmpty(t, pidStr) assert.Equal(t, pid2.String(), pidStr) + + gotPid, ok := cache.ByPeerId(pid2) + assert.True(t, ok, "expected pid to be in the cache") + assert.Equal(t, pid2.String(), gotPid.String()) + + gotPid, ok = cache.ByPeerId(pid1) + assert.True(t, ok, "expected pid to be in the cache") + assert.Equal(t, pid1.String(), gotPid.String()) }) } + +func TestPeerIdCache_EjectionScenarios(t *testing.T) { + cacheSize := uint32(3) + cache := internal.NewPeerIdCache(cacheSize) + assert.Equal(t, uint(0), cache.Size()) + + // add peer IDs to fill the cache + pid1 := p2pfixtures.PeerIdFixture(t) + pid2 := p2pfixtures.PeerIdFixture(t) + pid3 := p2pfixtures.PeerIdFixture(t) + + cache.PeerIdString(pid1) + assert.Equal(t, uint(1), cache.Size()) + cache.PeerIdString(pid2) + assert.Equal(t, uint(2), cache.Size()) + cache.PeerIdString(pid3) + assert.Equal(t, uint(3), cache.Size()) + + // check that all peer IDs are in the cache + assert.Equal(t, pid1.String(), cache.PeerIdString(pid1)) + assert.Equal(t, pid2.String(), cache.PeerIdString(pid2)) + assert.Equal(t, pid3.String(), cache.PeerIdString(pid3)) + assert.Equal(t, uint(3), cache.Size()) + + // add a new peer ID + pid4 := p2pfixtures.PeerIdFixture(t) + cache.PeerIdString(pid4) + assert.Equal(t, uint(3), cache.Size()) + + // check that pid1 is now the one that has been evicted + gotId1, ok := cache.ByPeerId(pid1) + assert.False(t, ok, "expected pid1 to be evicted") + assert.Equal(t, "", gotId1.String()) + + // confirm other peer IDs are still in the cache + gotId2, ok := cache.ByPeerId(pid2) + assert.True(t, ok, "expected pid2 to be in the cache") + assert.Equal(t, pid2.String(), gotId2.String()) + + gotId3, ok := cache.ByPeerId(pid3) + assert.True(t, ok, "expected pid3 to be in the cache") + assert.Equal(t, pid3.String(), gotId3.String()) + + gotId4, ok := cache.ByPeerId(pid4) + assert.True(t, ok, "expected pid4 to be in the cache") + assert.Equal(t, pid4.String(), gotId4.String()) +} From 30d9c3dd2de551d71d7cccf8991131ada629806e Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 12:19:09 -0700 Subject: [PATCH 13/25] adds test peer id logging --- network/p2p/p2plogging/logging_test.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 network/p2p/p2plogging/logging_test.go diff --git a/network/p2p/p2plogging/logging_test.go b/network/p2p/p2plogging/logging_test.go new file mode 100644 index 00000000000..448f65b6fb2 --- /dev/null +++ b/network/p2p/p2plogging/logging_test.go @@ -0,0 +1,18 @@ +package p2plogging_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/network/p2p/p2plogging" + p2ptest "github.com/onflow/flow-go/network/p2p/test" +) + +// TestPeerIdLogging checks the end-to-end functionality of the PeerId logger helper. +// It ensures that the PeerId logger helper returns the same string as the peer.ID.String() method. +func TestPeerIdLogging(t *testing.T) { + pid := p2ptest.PeerIdFixture(t) + pidStr := p2plogging.PeerId(pid) + require.Equal(t, pid.String(), pidStr) +} From 8f35c022caf78a2fa1c6340a268babce9deee513 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 13:02:51 -0700 Subject: [PATCH 14/25] adds benchmarking --- network/p2p/p2plogging/logging_test.go | 30 ++++++++++++++++++++++++++ network/p2p/test/fixtures.go | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/network/p2p/p2plogging/logging_test.go b/network/p2p/p2plogging/logging_test.go index 448f65b6fb2..341bf476d0d 100644 --- a/network/p2p/p2plogging/logging_test.go +++ b/network/p2p/p2plogging/logging_test.go @@ -3,6 +3,7 @@ package p2plogging_test import ( "testing" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "github.com/onflow/flow-go/network/p2p/p2plogging" @@ -16,3 +17,32 @@ func TestPeerIdLogging(t *testing.T) { pidStr := p2plogging.PeerId(pid) require.Equal(t, pid.String(), pidStr) } + +// BenchmarkPeerIdString benchmarks the peer.ID.String() method. +func BenchmarkPeerIdString(b *testing.B) { + count := 100 + pids := make([]peer.ID, 0, count) + for i := 0; i < count; i++ { + pids = append(pids, p2ptest.PeerIdFixture(b)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pids[i%count].String() + } +} + +// BenchmarkPeerIdLogging benchmarks the PeerId logger helper, which is expected to be faster than the peer.ID.String() method, +// as it caches the base58 encoded peer ID strings. +func BenchmarkPeerIdLogging(b *testing.B) { + count := 100 + pids := make([]peer.ID, 0, count) + for i := 0; i < count; i++ { + pids = append(pids, p2ptest.PeerIdFixture(b)) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = p2plogging.PeerId(pids[i%count]) + } +} diff --git a/network/p2p/test/fixtures.go b/network/p2p/test/fixtures.go index 7017a2fea1f..5483c91df7c 100644 --- a/network/p2p/test/fixtures.go +++ b/network/p2p/test/fixtures.go @@ -710,7 +710,7 @@ func EnsurePubsubMessageExchangeFromNode( // PeerIdFixture returns a random peer ID for testing. // peer ID is the identifier of a node on the libp2p network. -func PeerIdFixture(t *testing.T) peer.ID { +func PeerIdFixture(t testing.TB) peer.ID { buf := make([]byte, 16) n, err := rand.Read(buf) require.NoError(t, err) From d41913ead09c0a17840acded9729840a26241ae7 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 13:24:35 -0700 Subject: [PATCH 15/25] replaces all String() instances with p2plogging.PeerID --- cmd/observer/node_builder/observer_builder.go | 3 ++- follower/follower_builder.go | 3 ++- insecure/corruptlibp2p/pubsub_adapter.go | 5 ++-- module/metrics/libp2p_resource_manager.go | 13 ++++++----- module/metrics/network.go | 3 ++- network/errors.go | 4 +++- network/internal/p2putils/utils.go | 5 ++-- network/p2p/blob/blob_service.go | 3 ++- network/p2p/cache/gossipsub_spam_records.go | 5 ++-- network/p2p/connection/connection_gater.go | 5 ++-- .../p2p/connection/connection_gater_test.go | 5 ++-- network/p2p/connection/connector.go | 3 ++- .../p2p/connection/internal/loggerNotifiee.go | 5 ++-- network/p2p/connection/peerManager.go | 4 ++-- network/p2p/dht/dht.go | 5 ++-- .../control_message_validation_inspector.go | 19 +++++++-------- network/p2p/p2pbuilder/utils.go | 7 +++--- network/p2p/p2pnet/network.go | 23 ++++++++++--------- network/p2p/p2pnode/gossipsubMetrics.go | 3 ++- network/p2p/p2pnode/libp2pNode.go | 15 ++++++------ network/p2p/p2pnode/libp2pNode_test.go | 5 ++-- network/p2p/p2pnode/protocolPeerCache.go | 4 +++- network/p2p/scoring/registry.go | 13 ++++++----- network/p2p/test/topic_validator_test.go | 5 ++-- network/p2p/tracer/gossipSubMeshTracer.go | 9 ++++---- network/p2p/tracer/gossipSubScoreTracer.go | 3 ++- .../identity_provider_translator.go | 3 ++- network/p2p/unicast/errors.go | 4 +++- network/p2p/unicast/manager.go | 5 ++-- network/p2p/unicast/stream/errors.go | 6 +++-- network/test/unicast_authorization_test.go | 15 ++++++------ .../validator/authorized_sender_validator.go | 17 +++++++------- network/validator/pubsub/topic_validator.go | 3 ++- 33 files changed, 133 insertions(+), 97 deletions(-) diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 507de880479..ff6e5ff6da5 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -60,6 +60,7 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/p2pbuilder" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/tracer" @@ -588,7 +589,7 @@ func (builder *ObserverServiceBuilder) InitIDProviders() { if flowID, err := builder.IDTranslator.GetFlowID(pid); err != nil { // TODO: this is an instance of "log error and continue with best effort" anti-pattern - builder.Logger.Err(err).Str("peer", pid.String()).Msg("failed to translate to Flow ID") + builder.Logger.Err(err).Str("peer", p2plogging.PeerId(pid)).Msg("failed to translate to Flow ID") } else { result = append(result, flowID) } diff --git a/follower/follower_builder.go b/follower/follower_builder.go index bf7c3a40eb8..2b32040c5b1 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -50,6 +50,7 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/p2pbuilder" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/tracer" @@ -470,7 +471,7 @@ func (builder *FollowerServiceBuilder) InitIDProviders() { if flowID, err := builder.IDTranslator.GetFlowID(pid); err != nil { // TODO: this is an instance of "log error and continue with best effort" anti-pattern - builder.Logger.Err(err).Str("peer", pid.String()).Msg("failed to translate to Flow ID") + builder.Logger.Err(err).Str("peer", p2plogging.PeerId(pid)).Msg("failed to translate to Flow ID") } else { result = append(result, flowID) } diff --git a/insecure/corruptlibp2p/pubsub_adapter.go b/insecure/corruptlibp2p/pubsub_adapter.go index f9d08eed50e..64975a18e3c 100644 --- a/insecure/corruptlibp2p/pubsub_adapter.go +++ b/insecure/corruptlibp2p/pubsub_adapter.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ) @@ -65,7 +66,7 @@ func (c *CorruptGossipSubAdapter) RegisterTopicValidator(topic string, topicVali c.logger.Fatal(). Bool(logging.KeySuspicious, true). Str("topic", topic). - Str("origin_peer", from.String()). + Str("origin_peer", p2plogging.PeerId(from)). Str("result", fmt.Sprintf("%v", result)). Str("message_type", fmt.Sprintf("%T", message.Data)). Msgf("invalid validation result, should be a bug in the topic validator") @@ -74,7 +75,7 @@ func (c *CorruptGossipSubAdapter) RegisterTopicValidator(topic string, topicVali c.logger.Warn(). Bool(logging.KeySuspicious, true). Str("topic", topic). - Str("origin_peer", from.String()). + Str("origin_peer", p2plogging.PeerId(from)). Str("result", fmt.Sprintf("%v", result)). Str("message_type", fmt.Sprintf("%T", message.Data)). Msg("invalid validation result, returning reject") diff --git a/module/metrics/libp2p_resource_manager.go b/module/metrics/libp2p_resource_manager.go index 83c0c7da206..4effd90d5e5 100644 --- a/module/metrics/libp2p_resource_manager.go +++ b/module/metrics/libp2p_resource_manager.go @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/rs/zerolog" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ) @@ -171,22 +172,22 @@ func (l *LibP2PResourceManagerMetrics) BlockConn(dir network.Direction, usefd bo func (l *LibP2PResourceManagerMetrics) AllowStream(p peer.ID, dir network.Direction) { l.allowStreamCount.WithLabelValues(dir.String()).Inc() - l.logger.Trace().Str("peer", p.String()).Str("direction", dir.String()).Msg("allowing stream") + l.logger.Trace().Str("peer", p2plogging.PeerId(p)).Str("direction", dir.String()).Msg("allowing stream") } func (l *LibP2PResourceManagerMetrics) BlockStream(p peer.ID, dir network.Direction) { l.blockStreamCount.WithLabelValues(dir.String()).Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p.String()).Str("direction", dir.String()).Msg("blocking stream") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p2plogging.PeerId(p)).Str("direction", dir.String()).Msg("blocking stream") } func (l *LibP2PResourceManagerMetrics) AllowPeer(p peer.ID) { l.allowPeerCount.Inc() - l.logger.Trace().Str("peer", p.String()).Msg("allowing peer") + l.logger.Trace().Str("peer", p2plogging.PeerId(p)).Msg("allowing peer") } func (l *LibP2PResourceManagerMetrics) BlockPeer(p peer.ID) { l.blockPeerCount.Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p.String()).Msg("blocking peer") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("peer", p2plogging.PeerId(p)).Msg("blocking peer") } func (l *LibP2PResourceManagerMetrics) AllowProtocol(proto protocol.ID) { @@ -201,7 +202,7 @@ func (l *LibP2PResourceManagerMetrics) BlockProtocol(proto protocol.ID) { func (l *LibP2PResourceManagerMetrics) BlockProtocolPeer(proto protocol.ID, p peer.ID) { l.blockProtocolPeerCount.Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("protocol", string(proto)).Str("peer", p.String()).Msg("blocking protocol for peer") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("protocol", string(proto)).Str("peer", p2plogging.PeerId(p)).Msg("blocking protocol for peer") } func (l *LibP2PResourceManagerMetrics) AllowService(svc string) { @@ -216,7 +217,7 @@ func (l *LibP2PResourceManagerMetrics) BlockService(svc string) { func (l *LibP2PResourceManagerMetrics) BlockServicePeer(svc string, p peer.ID) { l.blockServicePeerCount.Inc() - l.logger.Debug().Bool(logging.KeySuspicious, true).Str("service", svc).Str("peer", p.String()).Msg("blocking service for peer") + l.logger.Debug().Bool(logging.KeySuspicious, true).Str("service", svc).Str("peer", p2plogging.PeerId(p)).Msg("blocking service for peer") } func (l *LibP2PResourceManagerMetrics) AllowMemory(size int) { diff --git a/module/metrics/network.go b/module/metrics/network.go index 311dbba9f15..af9359fef21 100644 --- a/module/metrics/network.go +++ b/module/metrics/network.go @@ -10,6 +10,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ) @@ -359,7 +360,7 @@ func (nc *NetworkCollector) OnUnauthorizedMessage(role, msgType, topic, offense // OnRateLimitedPeer tracks the number of rate limited messages seen on the network. func (nc *NetworkCollector) OnRateLimitedPeer(peerID peer.ID, role, msgType, topic, reason string) { nc.logger.Warn(). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Str("role", role). Str("message_type", msgType). Str("topic", topic). diff --git a/network/errors.go b/network/errors.go index f469165fe46..5c4485324e2 100644 --- a/network/errors.go +++ b/network/errors.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/network/p2p/p2plogging" ) var ( @@ -18,7 +20,7 @@ type ErrIllegalConnectionState struct { } func (e ErrIllegalConnectionState) Error() string { - return fmt.Sprintf("unexpected connection status to peer %s: received NotConnected status while connection list is not empty %d ", e.pid.String(), e.numOfConns) + return fmt.Sprintf("unexpected connection status to peer %s: received NotConnected status while connection list is not empty %d ", p2plogging.PeerId(e.pid), e.numOfConns) } // NewConnectionStatusErr returns a new ErrIllegalConnectionState. diff --git a/network/internal/p2putils/utils.go b/network/internal/p2putils/utils.go index 2415ca5b4c8..2325df8734a 100644 --- a/network/internal/p2putils/utils.go +++ b/network/internal/p2putils/utils.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/p2p/keyutils" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" ) @@ -31,9 +32,9 @@ func FlowStream(conn network.Conn) network.Stream { func StreamLogger(log zerolog.Logger, stream network.Stream) zerolog.Logger { logger := log.With(). Str("protocol", string(stream.Protocol())). - Str("remote_peer", stream.Conn().RemotePeer().String()). + Str("remote_peer", p2plogging.PeerId(stream.Conn().RemotePeer())). Str("remote_address", stream.Conn().RemoteMultiaddr().String()). - Str("local_peer", stream.Conn().LocalPeer().String()). + Str("local_peer", p2plogging.PeerId(stream.Conn().LocalPeer())). Str("local_address", stream.Conn().LocalMultiaddr().String()).Logger() return logger } diff --git a/network/p2p/blob/blob_service.go b/network/p2p/blob/blob_service.go index 2febe968689..7f8d06e56c1 100644 --- a/network/p2p/blob/blob_service.go +++ b/network/p2p/blob/blob_service.go @@ -30,6 +30,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ipld "github.com/ipfs/go-ipld-format" @@ -262,7 +263,7 @@ func AuthorizedRequester( return func(peerID peer.ID, _ cid.Cid) bool { lg := logger.With(). Str("component", "blob_service"). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Logger() id, ok := identityProvider.ByPeerID(peerID) diff --git a/network/p2p/cache/gossipsub_spam_records.go b/network/p2p/cache/gossipsub_spam_records.go index 61251e28bcc..265c2befbb7 100644 --- a/network/p2p/cache/gossipsub_spam_records.go +++ b/network/p2p/cache/gossipsub_spam_records.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // GossipSubSpamRecordCache is a cache for storing the gossipsub spam records of peers. It is thread-safe. @@ -187,10 +188,10 @@ func (a *GossipSubSpamRecordCache) Get(peerID peer.ID) (*p2p.GossipSubSpamRecord return e }) if err != nil { - return nil, fmt.Errorf("error while applying pre-processing functions to cache record for peer %s: %w", peerID.String(), err), false + return nil, fmt.Errorf("error while applying pre-processing functions to cache record for peer %s: %w", p2plogging.PeerId(peerID), err), false } if !updated { - return nil, fmt.Errorf("could not decay cache record for peer %s", peerID.String()), false + return nil, fmt.Errorf("could not decay cache record for peer %s", p2plogging.PeerId(peerID)), false } r := record.(gossipsubSpamRecordEntity).GossipSubSpamRecord diff --git a/network/p2p/connection/connection_gater.go b/network/p2p/connection/connection_gater.go index 3603d15d227..4bcfb16c9e0 100644 --- a/network/p2p/connection/connection_gater.go +++ b/network/p2p/connection/connection_gater.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ) @@ -71,7 +72,7 @@ func NewConnGater(log zerolog.Logger, identityProvider module.IdentityProvider, // InterceptPeerDial - a callback which allows or disallows outbound connection func (c *ConnGater) InterceptPeerDial(p peer.ID) bool { - lg := c.log.With().Str("peer_id", p.String()).Logger() + lg := c.log.With().Str("peer_id", p2plogging.PeerId(p)).Logger() disallowListCauses, disallowListed := c.disallowListOracle.IsDisallowListed(p) if disallowListed { @@ -128,7 +129,7 @@ func (c *ConnGater) InterceptSecured(dir network.Direction, p peer.ID, addr netw switch dir { case network.DirInbound: lg := c.log.With(). - Str("peer_id", p.String()). + Str("peer_id", p2plogging.PeerId(p)). Str("remote_address", addr.RemoteMultiaddr().String()). Logger() diff --git a/network/p2p/connection/connection_gater_test.go b/network/p2p/connection/connection_gater_test.go index 15d69fc44a7..5a2c678b15c 100644 --- a/network/p2p/connection/connection_gater_test.go +++ b/network/p2p/connection/connection_gater_test.go @@ -21,6 +21,7 @@ import ( "github.com/onflow/flow-go/network/p2p/connection" mockp2p "github.com/onflow/flow-go/network/p2p/mock" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/stream" "github.com/onflow/flow-go/utils/unittest" @@ -42,7 +43,7 @@ func TestConnectionGating(t *testing.T) { idProvider, p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { if !node1Peers.Has(p) { - return fmt.Errorf("id not found: %s", p.String()) + return fmt.Errorf("id not found: %s", p2plogging.PeerId(p)) } return nil }))) @@ -56,7 +57,7 @@ func TestConnectionGating(t *testing.T) { idProvider, p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(p peer.ID) error { if !node2Peers.Has(p) { - return fmt.Errorf("id not found: %s", p.String()) + return fmt.Errorf("id not found: %s", p2plogging.PeerId(p)) } return nil }))) diff --git a/network/p2p/connection/connector.go b/network/p2p/connection/connector.go index e185d38c69b..69fbb5d4359 100644 --- a/network/p2p/connection/connector.go +++ b/network/p2p/connection/connector.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" "github.com/onflow/flow-go/utils/rand" ) @@ -99,7 +100,7 @@ func (l *PeerUpdater) connectToPeers(ctx context.Context, peerIDs peer.IDSlice) for _, peerID := range peerIDs { if l.host.IsConnectedTo(peerID) { - l.log.Trace().Str("peer_id", peerID.String()).Msg("already connected to peer, skipping connection") + l.log.Trace().Str("peer_id", p2plogging.PeerId(peerID)).Msg("already connected to peer, skipping connection") continue } peerCh <- peer.AddrInfo{ID: peerID} diff --git a/network/p2p/connection/internal/loggerNotifiee.go b/network/p2p/connection/internal/loggerNotifiee.go index 9dc6fab9f75..ce49c6081a8 100644 --- a/network/p2p/connection/internal/loggerNotifiee.go +++ b/network/p2p/connection/internal/loggerNotifiee.go @@ -6,6 +6,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) type LoggerNotifiee struct { @@ -45,9 +46,9 @@ func (l *LoggerNotifiee) Disconnected(n network.Network, conn network.Conn) { func (l *LoggerNotifiee) connectionUpdateLogger(n network.Network, con network.Conn) zerolog.Logger { return l.logger.With(). - Str("remote_peer", con.RemotePeer().String()). + Str("remote_peer", p2plogging.PeerId(con.RemotePeer())). Str("remote_address", con.RemoteMultiaddr().String()). - Str("local_peer", con.LocalPeer().String()). + Str("local_peer", p2plogging.PeerId(con.LocalPeer())). Str("local_address", con.LocalMultiaddr().String()). Str("direction", con.Stat().Direction.String()). Int("total_connections", len(n.Conns())).Logger() diff --git a/network/p2p/connection/peerManager.go b/network/p2p/connection/peerManager.go index 11fe502a07c..d8e323813fd 100644 --- a/network/p2p/connection/peerManager.go +++ b/network/p2p/connection/peerManager.go @@ -3,7 +3,6 @@ package connection import ( "context" "fmt" - "sync" "time" @@ -13,6 +12,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" "github.com/onflow/flow-go/utils/rand" ) @@ -166,7 +166,7 @@ func (pm *PeerManager) SetPeersProvider(peersProvider p2p.PeersProvider) { // is disconnected immediately after being rate limited. func (pm *PeerManager) OnRateLimitedPeer(pid peer.ID, role, msgType, topic, reason string) { pm.logger.Warn(). - Str("peer_id", pid.String()). + Str("peer_id", p2plogging.PeerId(pid)). Str("role", role). Str("message_type", msgType). Str("topic", topic). diff --git a/network/p2p/dht/dht.go b/network/p2p/dht/dht.go index 459a84ea21a..930df0e2251 100644 --- a/network/p2p/dht/dht.go +++ b/network/p2p/dht/dht.go @@ -10,6 +10,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // This produces a new IPFS DHT @@ -33,12 +34,12 @@ func NewDHT(ctx context.Context, host host.Host, prefix protocol.ID, logger zero peerAddedCb := routingTable.PeerAdded routingTable.PeerRemoved = func(pid peer.ID) { peerRemovedCb(pid) - dhtLogger.Debug().Str("peer_id", pid.String()).Msg("peer removed from routing table") + dhtLogger.Debug().Str("peer_id", p2plogging.PeerId(pid)).Msg("peer removed from routing table") metrics.RoutingTablePeerRemoved() } routingTable.PeerAdded = func(pid peer.ID) { peerAddedCb(pid) - dhtLogger.Debug().Str("peer_id", pid.String()).Msg("peer added to routing table") + dhtLogger.Debug().Str("peer_id", p2plogging.PeerId(pid)).Msg("peer added to routing table") metrics.RoutingTablePeerAdded() } diff --git a/network/p2p/inspector/validation/control_message_validation_inspector.go b/network/p2p/inspector/validation/control_message_validation_inspector.go index e24ddfbe3fe..32f475de8d1 100644 --- a/network/p2p/inspector/validation/control_message_validation_inspector.go +++ b/network/p2p/inspector/validation/control_message_validation_inspector.go @@ -23,6 +23,7 @@ import ( "github.com/onflow/flow-go/network/p2p/inspector/internal/ratelimit" p2pmsg "github.com/onflow/flow-go/network/p2p/message" "github.com/onflow/flow-go/network/p2p/p2pconf" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/utils/logging" @@ -153,7 +154,7 @@ func (c *ControlMsgValidationInspector) Inspect(from peer.ID, rpc *pubsub.RPC) e control := rpc.GetControl() for _, ctrlMsgType := range p2pmsg.ControlMessageTypes() { lg := c.logger.With(). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Str("ctrl_msg_type", string(ctrlMsgType)).Logger() validationConfig, ok := c.config.GetCtrlMsgValidationConfig(ctrlMsgType) if !ok { @@ -188,7 +189,7 @@ func (c *ControlMsgValidationInspector) Inspect(from peer.ID, rpc *pubsub.RPC) e if err != nil { lg.Error(). Err(err). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Str("ctrl_msg_type", string(ctrlMsgType)). Msg("failed to get inspect message request") return fmt.Errorf("failed to get inspect message request: %w", err) @@ -213,7 +214,7 @@ func (c *ControlMsgValidationInspector) Inspect(from peer.ID, rpc *pubsub.RPC) e func (c *ControlMsgValidationInspector) inspectIWant(from peer.ID, iWants []*pubsub_pb.ControlIWant) error { lastHighest := c.rpcTracker.LastHighestIHaveRPCSize() lg := c.logger.With(). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Uint("max_sample_size", c.config.IWantRPCInspectionConfig.MaxSampleSize). Int64("last_highest_ihave_rpc_size", lastHighest). Logger() @@ -311,7 +312,7 @@ func (c *ControlMsgValidationInspector) blockingPreprocessingRpc(from peer.ID, v count := c.getCtrlMsgCount(validationConfig.ControlMsg, controlMessage) lg := c.logger.With(). Uint64("ctrl_msg_count", count). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Str("ctrl_msg_type", string(validationConfig.ControlMsg)).Logger() c.metrics.BlockingPreProcessingStarted(validationConfig.ControlMsg.String(), uint(count)) @@ -375,7 +376,7 @@ func (c *ControlMsgValidationInspector) blockingPreprocessingSampleRpc(from peer count := c.getCtrlMsgCount(validationConfig.ControlMsg, controlMessage) lg := c.logger.With(). Uint64("ctrl_msg_count", count). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Str("ctrl_msg_type", string(validationConfig.ControlMsg)).Logger() // if count greater than hard threshold perform synchronous topic validation on random subset of the iHave messages if count > validationConfig.HardThreshold { @@ -449,7 +450,7 @@ func (c *ControlMsgValidationInspector) processInspectMsgReq(req *InspectMsgRequ Error(). Err(err). Bool(logging.KeySuspicious, true). - Str("peer_id", req.Peer.String()). + Str("peer_id", p2plogging.PeerId(req.Peer)). Str("ctrl_msg_type", p2pmsg.CtrlMsgIWant.String()). Uint64("ctrl_msg_count", count). Msg("unexpected error encountered while performing iwant validation") @@ -458,7 +459,7 @@ func (c *ControlMsgValidationInspector) processInspectMsgReq(req *InspectMsgRequ } lg := c.logger.With(). - Str("peer_id", req.Peer.String()). + Str("peer_id", p2plogging.PeerId(req.Peer)). Str("ctrl_msg_type", string(req.validationConfig.ControlMsg)). Uint64("ctrl_msg_count", count).Logger() @@ -639,7 +640,7 @@ func (c *ControlMsgValidationInspector) validateTopic(from peer.ID, topic channe // errors are unexpected and irrecoverable indicating a bug. func (c *ControlMsgValidationInspector) validateClusterPrefixedTopic(from peer.ID, topic channels.Topic, activeClusterIds flow.ChainIDList) error { lg := c.logger.With(). - Str("from", from.String()). + Str("from", p2plogging.PeerId(from)). Logger() // reject messages from unstaked nodes for cluster prefixed topics nodeID, err := c.getFlowIdentifier(from) @@ -726,7 +727,7 @@ func (c *ControlMsgValidationInspector) logAndDistributeAsyncInspectErr(req *Ins lg := c.logger.With(). Bool(logging.KeySuspicious, true). Bool(logging.KeyNetworkingSecurity, true). - Str("peer_id", req.Peer.String()). + Str("peer_id", p2plogging.PeerId(req.Peer)). Str("ctrl_msg_type", string(req.validationConfig.ControlMsg)). Uint64("ctrl_msg_count", count).Logger() diff --git a/network/p2p/p2pbuilder/utils.go b/network/p2p/p2pbuilder/utils.go index 066ddfdbff5..ef2a2bc1ae9 100644 --- a/network/p2p/p2pbuilder/utils.go +++ b/network/p2p/p2pbuilder/utils.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) const keyResourceManagerLimit = "libp2p_resource_manager_limit" @@ -18,9 +19,9 @@ const keyResourceManagerLimit = "libp2p_resource_manager_limit" func notEjectedPeerFilter(idProvider module.IdentityProvider) p2p.PeerFilter { return func(p peer.ID) error { if id, found := idProvider.ByPeerID(p); !found { - return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p.String()) + return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p2plogging.PeerId(p)) } else if id.Ejected { - return fmt.Errorf("peer %s with node id %s is ejected", p.String(), id.NodeID.String()) + return fmt.Errorf("peer %s with node id %s is ejected", p2plogging.PeerId(p), id.NodeID.String()) } return nil @@ -111,7 +112,7 @@ func (l *limitConfigLogger) logProtocolLimits(p map[protocol.ID]rcmgr.ResourceLi func (l *limitConfigLogger) logPeerLimits(p map[peer.ID]rcmgr.ResourceLimits) { for pId, pLimits := range p { - lg := l.withBaseLimit(fmt.Sprintf("peer_%s", pId.String()), pLimits) + lg := l.withBaseLimit(fmt.Sprintf("peer_%s", p2plogging.PeerId(pId)), pLimits) lg.Info().Msg("peer limits set") } } diff --git a/network/p2p/p2pnet/network.go b/network/p2p/p2pnet/network.go index c971f6b56f4..3b280ecaeae 100644 --- a/network/p2p/p2pnet/network.go +++ b/network/p2p/p2pnet/network.go @@ -31,6 +31,7 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/blob" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet/internal" "github.com/onflow/flow-go/network/p2p/ping" "github.com/onflow/flow-go/network/p2p/subscription" @@ -809,7 +810,7 @@ func DefaultValidators(log zerolog.Logger, flowID flow.Identifier) []network.Mes func (n *Network) isProtocolParticipant() p2p.PeerFilter { return func(p peer.ID) error { if _, ok := n.Identity(p); !ok { - return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p.String()) + return fmt.Errorf("failed to get identity of unknown peer with peer id %s", p2plogging.PeerId(p)) } return nil } @@ -885,7 +886,7 @@ func (n *Network) authorizedPeers() peer.IDSlice { if err := filter(id); err != nil { n.logger.Debug(). Err(err). - Str("peer_id", id.String()). + Str("peer_id", p2plogging.PeerId(id)). Msg("filtering topology peer") peerAllowed = false @@ -990,7 +991,7 @@ func (n *Network) handleIncomingStream(s libp2pnet.Stream) { // ignore messages if node does not have subscription to topic if !n.libP2PNode.HasSubscription(topic) { violation := &network.Violation{ - Identity: nil, PeerID: remotePeer.String(), Channel: channel, Protocol: message.ProtocolTypeUnicast, + Identity: nil, PeerID: p2plogging.PeerId(remotePeer), Channel: channel, Protocol: message.ProtocolTypeUnicast, } msgCode, err := codec.MessageCodeFromPayload(msg.Payload) @@ -1114,14 +1115,14 @@ func (n *Network) processUnicastStreamMessage(remotePeer peer.ID, msg *message.M maxSize, err := UnicastMaxMsgSizeByCode(msg.Payload) if err != nil { n.slashingViolationsConsumer.OnUnknownMsgTypeError(&network.Violation{ - Identity: nil, PeerID: remotePeer.String(), MsgType: "", Channel: channel, Protocol: message.ProtocolTypeUnicast, Err: err, + Identity: nil, PeerID: p2plogging.PeerId(remotePeer), MsgType: "", Channel: channel, Protocol: message.ProtocolTypeUnicast, Err: err, }) return } if msg.Size() > maxSize { // message size exceeded n.logger.Error(). - Str("peer_id", remotePeer.String()). + Str("peer_id", p2plogging.PeerId(remotePeer)). Str("channel", msg.ChannelID). Int("max_size", maxSize). Int("size", msg.Size()). @@ -1137,7 +1138,7 @@ func (n *Network) processUnicastStreamMessage(remotePeer peer.ID, msg *message.M n.logger. Error(). Err(err). - Str("peer_id", remotePeer.String()). + Str("peer_id", p2plogging.PeerId(remotePeer)). Str("type", messageType). Str("channel", msg.ChannelID). Msg("unicast authorized sender validation failed") @@ -1156,7 +1157,7 @@ func (n *Network) processAuthenticatedMessage(msg *message.Message, peerID peer. // authenticated which means it must be known n.logger.Error(). Err(err). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Bool(logging.KeySuspicious, true). Msg("dropped message from unknown peer") return @@ -1168,14 +1169,14 @@ func (n *Network) processAuthenticatedMessage(msg *message.Message, peerID peer. case codec.IsErrUnknownMsgCode(err): // slash peer if message contains unknown message code byte violation := &network.Violation{ - PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, } n.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return case codec.IsErrMsgUnmarshal(err) || codec.IsErrInvalidEncoding(err): // slash if peer sent a message that could not be marshalled into the message type denoted by the message code byte violation := &network.Violation{ - PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, } n.slashingViolationsConsumer.OnInvalidMsgError(violation) return @@ -1185,7 +1186,7 @@ func (n *Network) processAuthenticatedMessage(msg *message.Message, peerID peer. // collect slashing data because this could potentially lead to slashing err = fmt.Errorf("unexpected error during message validation: %w", err) violation := &network.Violation{ - PeerID: peerID.String(), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, + PeerID: p2plogging.PeerId(peerID), OriginID: originId, Channel: channel, Protocol: protocol, Err: err, } n.slashingViolationsConsumer.OnUnexpectedError(violation) return @@ -1195,7 +1196,7 @@ func (n *Network) processAuthenticatedMessage(msg *message.Message, peerID peer. if err != nil { n.logger.Error(). Err(err). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Str("origin_id", originId.String()). Msg("could not create incoming message scope") return diff --git a/network/p2p/p2pnode/gossipsubMetrics.go b/network/p2p/p2pnode/gossipsubMetrics.go index 37cf96f6a82..4a06b7e6e7a 100644 --- a/network/p2p/p2pnode/gossipsubMetrics.go +++ b/network/p2p/p2pnode/gossipsubMetrics.go @@ -7,6 +7,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // GossipSubControlMessageMetrics is a metrics and observability wrapper component for the incoming RPCs to a @@ -27,7 +28,7 @@ func NewGossipSubControlMessageMetrics(metrics module.GossipSubRouterMetrics, lo // ObserveRPC is invoked to record metrics on incoming RPC messages. func (o *GossipSubControlMessageMetrics) ObserveRPC(from peer.ID, rpc *pubsub.RPC) { - lg := o.logger.With().Str("peer_id", from.String()).Logger() + lg := o.logger.With().Str("peer_id", p2plogging.PeerId(from)).Logger() includedMessages := len(rpc.GetPublish()) ctl := rpc.GetControl() diff --git a/network/p2p/p2pnode/libp2pNode.go b/network/p2p/p2pnode/libp2pNode.go index 5f1580c9d02..074d76b45a6 100644 --- a/network/p2p/p2pnode/libp2pNode.go +++ b/network/p2p/p2pnode/libp2pNode.go @@ -25,6 +25,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnode/internal" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/utils/logging" @@ -160,7 +161,7 @@ func (n *Node) RemovePeer(peerID peer.ID) error { // logging with suspicious level as we only expect to disconnect from a peer if it is not part of the // protocol state. n.logger.Warn(). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Bool(logging.KeySuspicious, true). Msg("disconnected from peer") @@ -218,7 +219,7 @@ func (n *Node) OpenProtectedStream(ctx context.Context, peerID peer.ID, protecti resetErr := s.Reset() if resetErr != nil { n.logger.Error(). - Str("target_peer_id", peerID.String()). + Str("target_peer_id", p2plogging.PeerId(peerID)). Err(resetErr). Msg("failed to reset stream") } @@ -245,7 +246,7 @@ func (n *Node) OpenProtectedStream(ctx context.Context, peerID peer.ID, protecti // - error: An error, if any occurred during the process. This includes failure in creating the stream. All returned // errors during this process can be considered benign. func (n *Node) createStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) { - lg := n.logger.With().Str("peer_id", peerID.String()).Logger() + lg := n.logger.With().Str("peer_id", p2plogging.PeerId(peerID)).Logger() // If we do not currently have any addresses for the given peer, stream creation will almost // certainly fail. If this Node was configured with a routing system, we can try to use it to @@ -483,7 +484,7 @@ func (n *Node) WithPeersProvider(peersProvider p2p.PeersProvider) { causes, disallowListed := n.disallowListedCache.IsDisallowListed(peerId) if disallowListed { n.logger.Warn(). - Str("peer_id", peerId.String()). + Str("peer_id", p2plogging.PeerId(peerId)). Str("causes", fmt.Sprintf("%v", causes)). Msg("peer is disallowed for a cause, removing from authorized peers of peer manager") @@ -592,12 +593,12 @@ func (n *Node) OnDisallowListNotification(peerId peer.ID, cause flownet.Disallow causes, err := n.disallowListedCache.DisallowFor(peerId, cause) if err != nil { // returned error is fatal. - n.logger.Fatal().Err(err).Str("peer_id", peerId.String()).Msg("failed to add peer to disallow list") + n.logger.Fatal().Err(err).Str("peer_id", p2plogging.PeerId(peerId)).Msg("failed to add peer to disallow list") } // TODO: this code should further be refactored to also log the Flow id. n.logger.Warn(). - Str("peer_id", peerId.String()). + Str("peer_id", p2plogging.PeerId(peerId)). Str("notification_cause", cause.String()). Str("causes", fmt.Sprintf("%v", causes)). Msg("peer added to disallow list cache") @@ -618,7 +619,7 @@ func (n *Node) OnAllowListNotification(peerId peer.ID, cause flownet.DisallowLis remainingCauses := n.disallowListedCache.AllowFor(peerId, cause) n.logger.Info(). - Str("peer_id", peerId.String()). + Str("peer_id", p2plogging.PeerId(peerId)). Str("causes", fmt.Sprintf("%v", cause)). Str("remaining_causes", fmt.Sprintf("%v", remainingCauses)). Msg("peer is allow-listed for cause") diff --git a/network/p2p/p2pnode/libp2pNode_test.go b/network/p2p/p2pnode/libp2pNode_test.go index 3a42b2bbe24..ad42ec17108 100644 --- a/network/p2p/p2pnode/libp2pNode_test.go +++ b/network/p2p/p2pnode/libp2pNode_test.go @@ -25,6 +25,7 @@ import ( "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnode" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/protocols" @@ -174,7 +175,7 @@ func TestConnGater(t *testing.T) { idProvider, p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { if !node1Peers.Has(pid) { - return fmt.Errorf("peer id not found: %s", pid.String()) + return fmt.Errorf("peer id not found: %s", p2plogging.PeerId(pid)) } return nil }))) @@ -193,7 +194,7 @@ func TestConnGater(t *testing.T) { idProvider, p2ptest.WithConnectionGater(p2ptest.NewConnectionGater(idProvider, func(pid peer.ID) error { if !node2Peers.Has(pid) { - return fmt.Errorf("id not found: %s", pid.String()) + return fmt.Errorf("id not found: %s", p2plogging.PeerId(pid)) } return nil }))) diff --git a/network/p2p/p2pnode/protocolPeerCache.go b/network/p2p/p2pnode/protocolPeerCache.go index d45f855f80d..125d9aa3b37 100644 --- a/network/p2p/p2pnode/protocolPeerCache.go +++ b/network/p2p/p2pnode/protocolPeerCache.go @@ -10,6 +10,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/rs/zerolog" + + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // ProtocolPeerCache store a mapping from protocol ID to peers who support that protocol @@ -95,7 +97,7 @@ func (p *ProtocolPeerCache) consumeSubscription(logger zerolog.Logger, h host.Ho case event.EvtPeerIdentificationCompleted: protocols, err := h.Peerstore().GetProtocols(evt.Peer) if err != nil { - logger.Err(err).Str("peer", evt.Peer.String()).Msg("failed to get protocols for peer") + logger.Err(err).Str("peer_id", p2plogging.PeerId(evt.Peer)).Msg("failed to get protocols for peer") continue } p.AddProtocols(evt.Peer, protocols) diff --git a/network/p2p/scoring/registry.go b/network/p2p/scoring/registry.go index 9009b86f41a..a1174e165b9 100644 --- a/network/p2p/scoring/registry.go +++ b/network/p2p/scoring/registry.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/network/p2p" netcache "github.com/onflow/flow-go/network/p2p/cache" p2pmsg "github.com/onflow/flow-go/network/p2p/message" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ) @@ -138,13 +139,13 @@ func (r *GossipSubAppSpecificScoreRegistry) AppSpecificScoreFunc() func(peer.ID) return func(pid peer.ID) float64 { appSpecificScore := float64(0) - lg := r.logger.With().Str("peer_id", pid.String()).Logger() + lg := r.logger.With().Str("peer_id", p2plogging.PeerId(pid)).Logger() // (1) spam penalty: the penalty is applied to the application specific penalty when a peer conducts a spamming misbehaviour. spamRecord, err, spamRecordExists := r.spamScoreCache.Get(pid) if err != nil { // the error is considered fatal as it means the cache is not working properly. // we should not continue with the execution as it may lead to routing attack vulnerability. - r.logger.Fatal().Str("peer_id", pid.String()).Err(err).Msg("could not get application specific penalty for peer") + r.logger.Fatal().Str("peer_id", p2plogging.PeerId(pid)).Err(err).Msg("could not get application specific penalty for peer") return appSpecificScore // unreachable, but added to avoid proceeding with the execution if log level is changed. } @@ -189,7 +190,7 @@ func (r *GossipSubAppSpecificScoreRegistry) AppSpecificScoreFunc() func(peer.ID) } func (r *GossipSubAppSpecificScoreRegistry) stakingScore(pid peer.ID) (float64, flow.Identifier, flow.Role) { - lg := r.logger.With().Str("peer_id", pid.String()).Logger() + lg := r.logger.With().Str("peer_id", p2plogging.PeerId(pid)).Logger() // checks if peer has a valid Flow protocol identity. flowId, err := HasValidFlowIdentity(r.idProvider, pid) @@ -224,7 +225,7 @@ func (r *GossipSubAppSpecificScoreRegistry) subscriptionPenalty(pid peer.ID, flo // checks if peer has any subscription violation. if err := r.validator.CheckSubscribedToAllowedTopics(pid, role); err != nil { r.logger.Err(err). - Str("peer_id", pid.String()). + Str("peer_id", p2plogging.PeerId(pid)). Hex("flow_id", logging.ID(flowId)). Bool(logging.KeySuspicious, true). Msg("invalid subscription detected, penalizing peer") @@ -241,7 +242,7 @@ func (r *GossipSubAppSpecificScoreRegistry) OnInvalidControlMessageNotification( // we use mutex to ensure the method is concurrency safe. lg := r.logger.With(). - Str("peer_id", notification.PeerID.String()). + Str("peer_id", p2plogging.PeerId(notification.PeerID)). Str("misbehavior_type", notification.MsgType.String()).Logger() // try initializing the application specific penalty for the peer if it is not yet initialized. @@ -249,7 +250,7 @@ func (r *GossipSubAppSpecificScoreRegistry) OnInvalidControlMessageNotification( // initialization is successful only if the peer is not yet cached. initialized := r.spamScoreCache.Add(notification.PeerID, r.init()) if initialized { - lg.Trace().Str("peer_id", notification.PeerID.String()).Msg("application specific penalty initialized for peer") + lg.Trace().Str("peer_id", p2plogging.PeerId(notification.PeerID)).Msg("application specific penalty initialized for peer") } record, err := r.spamScoreCache.Update(notification.PeerID, func(record p2p.GossipSubSpamRecord) p2p.GossipSubSpamRecord { diff --git a/network/p2p/test/topic_validator_test.go b/network/p2p/test/topic_validator_test.go index 2aa1571aa13..21fd328cf1a 100644 --- a/network/p2p/test/topic_validator_test.go +++ b/network/p2p/test/topic_validator_test.go @@ -24,6 +24,7 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/utils" @@ -63,7 +64,7 @@ func TestTopicValidator_Unstaked(t *testing.T) { isStaked := func(pid peer.ID) error { fid, err := translatorFixture.GetFlowID(pid) if err != nil { - return fmt.Errorf("could not translate the peer_id %s to a Flow identifier: %w", pid.String(), err) + return fmt.Errorf("could not translate the peer_id %s to a Flow identifier: %w", p2plogging.PeerId(pid), err) } if _, ok := ids.ByNodeID(fid); !ok { @@ -341,7 +342,7 @@ func TestAuthorizedSenderValidator_Unauthorized(t *testing.T) { violation := &network.Violation{ Identity: &identity3, - PeerID: an1.ID().String(), + PeerID: p2plogging.PeerId(an1.ID()), OriginID: identity3.NodeID, MsgType: "*messages.BlockProposal", Channel: channel, diff --git a/network/p2p/tracer/gossipSubMeshTracer.go b/network/p2p/tracer/gossipSubMeshTracer.go index 07f2c430f83..cbd3d18d409 100644 --- a/network/p2p/tracer/gossipSubMeshTracer.go +++ b/network/p2p/tracer/gossipSubMeshTracer.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/tracer/internal" "github.com/onflow/flow-go/utils/logging" ) @@ -131,7 +132,7 @@ func (t *GossipSubMeshTracer) Graft(p peer.ID, topic string) { t.topicMeshMu.Lock() defer t.topicMeshMu.Unlock() - lg := t.logger.With().Str("topic", topic).Str("peer_id", p.String()).Logger() + lg := t.logger.With().Str("topic", topic).Str("peer_id", p2plogging.PeerId(p)).Logger() if _, ok := t.topicMeshMap[topic]; !ok { t.topicMeshMap[topic] = make(map[peer.ID]struct{}) @@ -158,7 +159,7 @@ func (t *GossipSubMeshTracer) Prune(p peer.ID, topic string) { t.topicMeshMu.Lock() defer t.topicMeshMu.Unlock() - lg := t.logger.With().Str("topic", topic).Str("peer_id", p.String()).Logger() + lg := t.logger.With().Str("topic", topic).Str("peer_id", p2plogging.PeerId(p)).Logger() if _, ok := t.topicMeshMap[topic]; !ok { return @@ -240,11 +241,11 @@ func (t *GossipSubMeshTracer) logPeers() { if !exists { shouldWarn = true - topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=unknown, role=unknown", p.String())) + topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=unknown, role=unknown", p2plogging.PeerId(p))) continue } - topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=%x, role=%s", p.String(), id.NodeID, id.Role.String())) + topicPeers = topicPeers.Str(strconv.Itoa(peerIndex), fmt.Sprintf("pid=%s, flow_id=%x, role=%s", p2plogging.PeerId(p), id.NodeID, id.Role.String())) } lg := t.logger.With(). diff --git a/network/p2p/tracer/gossipSubScoreTracer.go b/network/p2p/tracer/gossipSubScoreTracer.go index facdc8bd182..b28189ec624 100644 --- a/network/p2p/tracer/gossipSubScoreTracer.go +++ b/network/p2p/tracer/gossipSubScoreTracer.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" ) @@ -225,7 +226,7 @@ func (g *GossipSubScoreTracer) logPeerScore(peerID peer.ID) bool { } lg = lg.With(). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Float64("overall_score", snapshot.Score). Float64("app_specific_score", snapshot.AppSpecificScore). Float64("ip_colocation_factor", snapshot.IPColocationFactor). diff --git a/network/p2p/translator/identity_provider_translator.go b/network/p2p/translator/identity_provider_translator.go index c2bee0170a3..8156f2e22a2 100644 --- a/network/p2p/translator/identity_provider_translator.go +++ b/network/p2p/translator/identity_provider_translator.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/keyutils" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // IdentityProviderIDTranslator implements an `p2p.IDTranslator` which provides ID @@ -31,7 +32,7 @@ func (t *IdentityProviderIDTranslator) GetFlowID(p peer.ID) (flow.Identifier, er } ids := t.idProvider.Identities(filter.HasNetworkingKey(flowKey)) if len(ids) == 0 { - return flow.ZeroID, fmt.Errorf("could not find identity corresponding to peer id %v", p.String()) + return flow.ZeroID, fmt.Errorf("could not find identity corresponding to peer id %v", p2plogging.PeerId(p)) } return ids[0].NodeID, nil } diff --git a/network/p2p/unicast/errors.go b/network/p2p/unicast/errors.go index 85690508e91..d8abb2624f7 100644 --- a/network/p2p/unicast/errors.go +++ b/network/p2p/unicast/errors.go @@ -5,6 +5,8 @@ import ( "fmt" "github.com/libp2p/go-libp2p/core/peer" + + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // ErrDialInProgress indicates that the libp2p node is currently dialing the peer. @@ -13,7 +15,7 @@ type ErrDialInProgress struct { } func (e ErrDialInProgress) Error() string { - return fmt.Sprintf("dialing to peer %s already in progress", e.pid.String()) + return fmt.Sprintf("dialing to peer %s already in progress", p2plogging.PeerId(e.pid)) } // NewDialInProgressErr returns a new ErrDialInProgress. diff --git a/network/p2p/unicast/manager.go b/network/p2p/unicast/manager.go index 43365ebecca..a03f5f1de2b 100644 --- a/network/p2p/unicast/manager.go +++ b/network/p2p/unicast/manager.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/stream" ) @@ -148,7 +149,7 @@ func (m *Manager) tryCreateStream(ctx context.Context, peerID peer.ID, maxAttemp if IsErrDialInProgress(err) { m.logger.Warn(). Err(err). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Int("attempt", attempts). Uint64("max_attempts", maxAttempts). Msg("retrying create stream, dial to peer in progress") @@ -270,7 +271,7 @@ func (m *Manager) dialPeer(ctx context.Context, peerID peer.ID, maxAttempts uint } m.logger.Warn(). Err(err). - Str("peer_id", peerID.String()). + Str("peer_id", p2plogging.PeerId(peerID)). Int("attempt", dialAttempts). Uint64("max_attempts", maxAttempts). Msg("retrying peer dialing") diff --git a/network/p2p/unicast/stream/errors.go b/network/p2p/unicast/stream/errors.go index dc3f5250edd..9c73294c52b 100644 --- a/network/p2p/unicast/stream/errors.go +++ b/network/p2p/unicast/stream/errors.go @@ -6,6 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // ErrSecurityProtocolNegotiationFailed indicates security protocol negotiation failed during the stream factory connect attempt. @@ -15,7 +17,7 @@ type ErrSecurityProtocolNegotiationFailed struct { } func (e ErrSecurityProtocolNegotiationFailed) Error() string { - return fmt.Errorf("failed to dial remote peer %s in stream factory invalid node ID: %w", e.pid.String(), e.err).Error() + return fmt.Errorf("failed to dial remote peer %s in stream factory invalid node ID: %w", p2plogging.PeerId(e.pid), e.err).Error() } // NewSecurityProtocolNegotiationErr returns a new ErrSecurityProtocolNegotiationFailed. @@ -37,7 +39,7 @@ type ErrProtocolNotSupported struct { } func (e ErrProtocolNotSupported) Error() string { - return fmt.Errorf("failed to dial remote peer %s remote node is running on a different spork: %w, protocol attempted: %s", e.peerID.String(), e.err, e.protocolIDS).Error() + return fmt.Errorf("failed to dial remote peer %s remote node is running on a different spork: %w, protocol attempted: %s", p2plogging.PeerId(e.peerID), e.err, e.protocolIDS).Error() } // NewProtocolNotSupportedErr returns a new ErrSecurityProtocolNegotiationFailed. diff --git a/network/test/unicast_authorization_test.go b/network/test/unicast_authorization_test.go index e39c2ae630c..b9cec9ba8b0 100644 --- a/network/test/unicast_authorization_test.go +++ b/network/test/unicast_authorization_test.go @@ -23,6 +23,7 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/validator" "github.com/onflow/flow-go/utils/unittest" @@ -130,7 +131,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnstakedPeer() var nilID *flow.Identity expectedViolation := &network.Violation{ Identity: nilID, // because the peer will be unverified this identity will be nil - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID Protocol: message.ProtocolTypeUnicast, @@ -181,7 +182,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_EjectedPeer() { expectedViolation := &network.Violation{ Identity: u.senderID, // we expect this method to be called with the ejected identity OriginID: u.senderID.NodeID, - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "", // message will not be decoded before OnSenderEjectedError is logged, we won't log message type Channel: channels.TestNetworkChannel, // message will not be decoded before OnSenderEjectedError is logged, we won't log peer ID Protocol: message.ProtocolTypeUnicast, @@ -221,7 +222,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedPee expectedViolation := &network.Violation{ Identity: u.senderID, OriginID: u.senderID.NodeID, - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "*message.TestMessage", Channel: channels.ConsensusCommittee, Protocol: message.ProtocolTypeUnicast, @@ -276,7 +277,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnknownMsgCode( var nilID *flow.Identity expectedViolation := &network.Violation{ Identity: nilID, - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "", Channel: channels.TestNetworkChannel, Protocol: message.ProtocolTypeUnicast, @@ -324,7 +325,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_WrongMsgCode() expectedViolation := &network.Violation{ Identity: u.senderID, OriginID: u.senderID.NodeID, - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "*messages.DKGMessage", Channel: channels.TestNetworkChannel, Protocol: message.ProtocolTypeUnicast, @@ -400,7 +401,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_UnauthorizedUni expectedViolation := &network.Violation{ Identity: u.senderID, OriginID: u.senderID.NodeID, - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "*messages.BlockProposal", Channel: channels.ConsensusCommittee, Protocol: message.ProtocolTypeUnicast, @@ -441,7 +442,7 @@ func (u *UnicastAuthorizationTestSuite) TestUnicastAuthorization_ReceiverHasNoSu expectedViolation := &network.Violation{ Identity: nil, - PeerID: expectedSenderPeerID.String(), + PeerID: p2plogging.PeerId(expectedSenderPeerID), MsgType: "*message.TestMessage", Channel: channels.TestNetworkChannel, Protocol: message.ProtocolTypeUnicast, diff --git a/network/validator/authorized_sender_validator.go b/network/validator/authorized_sender_validator.go index 6841d69a9e6..69d925661a1 100644 --- a/network/validator/authorized_sender_validator.go +++ b/network/validator/authorized_sender_validator.go @@ -13,6 +13,7 @@ import ( "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) var ( @@ -61,14 +62,14 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan // something terrible went wrong. identity, ok := av.getIdentity(from) if !ok { - violation := &network.Violation{PeerID: from.String(), Channel: channel, Protocol: protocol, Err: ErrIdentityUnverified} + violation := &network.Violation{PeerID: p2plogging.PeerId(from), Channel: channel, Protocol: protocol, Err: ErrIdentityUnverified} av.slashingViolationsConsumer.OnUnAuthorizedSenderError(violation) return "", ErrIdentityUnverified } msgCode, err := codec.MessageCodeFromPayload(payload) if err != nil { - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return "", err } @@ -78,23 +79,23 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan case err == nil: return msgType, nil case message.IsUnknownMsgTypeErr(err) || codec.IsErrUnknownMsgCode(err): - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnknownMsgTypeError(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedMessageOnChannel) || errors.Is(err, message.ErrUnauthorizedRole): - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnAuthorizedSenderError(violation) return msgType, err case errors.Is(err, ErrSenderEjected): - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnSenderEjectedError(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedUnicastOnChannel): - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnauthorizedUnicastOnChannel(violation) return msgType, err case errors.Is(err, message.ErrUnauthorizedPublishOnChannel): - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnauthorizedPublishOnChannel(violation) return msgType, err default: @@ -102,7 +103,7 @@ func (av *AuthorizedSenderValidator) Validate(from peer.ID, payload []byte, chan // don't crash as a result of external inputs since that creates a DoS vector // collect slashing data because this could potentially lead to slashing err = fmt.Errorf("unexpected error during message validation: %w", err) - violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: from.String(), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} + violation := &network.Violation{OriginID: identity.NodeID, Identity: identity, PeerID: p2plogging.PeerId(from), MsgType: msgType, Channel: channel, Protocol: protocol, Err: err} av.slashingViolationsConsumer.OnUnexpectedError(violation) return msgType, err } diff --git a/network/validator/pubsub/topic_validator.go b/network/validator/pubsub/topic_validator.go index 954c5f6b401..078f9272b12 100644 --- a/network/validator/pubsub/topic_validator.go +++ b/network/validator/pubsub/topic_validator.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/validator" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" @@ -85,7 +86,7 @@ func TopicValidator(log zerolog.Logger, peerFilter func(peer.ID) error, validato } lg := log.With(). - Str("peer_id", from.String()). + Str("peer_id", p2plogging.PeerId(from)). Str("topic", rawMsg.GetTopic()). Int("raw_msg_size", len(rawMsg.Data)). Int("msg_size", msg.Size()). From abc2e475f86568f4872e9dfe080b73923c35c27b Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 13:32:15 -0700 Subject: [PATCH 16/25] skips benchmarking --- network/p2p/p2plogging/logging_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/network/p2p/p2plogging/logging_test.go b/network/p2p/p2plogging/logging_test.go index 341bf476d0d..3717f226631 100644 --- a/network/p2p/p2plogging/logging_test.go +++ b/network/p2p/p2plogging/logging_test.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/unittest" ) // TestPeerIdLogging checks the end-to-end functionality of the PeerId logger helper. @@ -20,6 +21,8 @@ func TestPeerIdLogging(t *testing.T) { // BenchmarkPeerIdString benchmarks the peer.ID.String() method. func BenchmarkPeerIdString(b *testing.B) { + unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id string benchmarking, set environment variable to enable") + count := 100 pids := make([]peer.ID, 0, count) for i := 0; i < count; i++ { @@ -35,6 +38,8 @@ func BenchmarkPeerIdString(b *testing.B) { // BenchmarkPeerIdLogging benchmarks the PeerId logger helper, which is expected to be faster than the peer.ID.String() method, // as it caches the base58 encoded peer ID strings. func BenchmarkPeerIdLogging(b *testing.B) { + unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id logging benchmarking, set environment variable to enable") + count := 100 pids := make([]peer.ID, 0, count) for i := 0; i < count; i++ { From 92b49339b6734c6162e4ba46c60aa450eb3afffc Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:15:46 -0700 Subject: [PATCH 17/25] wip fixing import cycles --- .../node_builder/access_node_builder.go | 9 +- cmd/collection/main.go | 3 +- cmd/execution_builder.go | 3 +- cmd/observer/node_builder/observer_builder.go | 11 +- cmd/scaffold.go | 9 +- cmd/verification_builder.go | 3 +- engine/access/state_stream/backend.go | 7 +- .../state_stream/backend_executiondata.go | 5 +- .../backend_executiondata_test.go | 11 +- engine/access/state_stream/engine.go | 3 +- engine/access/state_stream/mock/api.go | 2 +- .../mock/get_execution_data_func.go | 3 +- engine/common/rpc/convert/execution_data.go | 16 +- .../common/rpc/convert/execution_data_test.go | 8 +- engine/execution/block_result.go | 10 +- .../computation/computer/result_collector.go | 4 +- .../retryable_uploader_wrapper_test.go | 10 +- engine/execution/state/unittest/fixtures.go | 3 +- engine/execution/testutil/fixtures.go | 6 +- follower/follower_builder.go | 11 +- insecure/cmd/corrupted_builder.go | 4 +- insecure/corruptlibp2p/pubsub_adapter.go | 2 +- module/chunks/chunkVerifier.go | 3 +- module/chunks/chunkVerifier_test.go | 5 +- .../execution_data/cache/cache.go | 11 +- .../execution_data/downloader.go | 13 +- .../execution_data/downloader_test.go | 3 +- .../execution_data/mock/downloader.go | 2 +- .../mock/execution_data_store.go | 2 +- .../execution_data/{ => model}/entity.go | 2 +- .../{ => model}/execution_data.go | 2 +- .../execution_data/serializer.go | 5 +- .../executiondatasync/execution_data/store.go | 21 +- .../execution_data/store_test.go | 25 +- .../executiondatasync/execution_data/util.go | 3 +- module/executiondatasync/provider/provider.go | 15 +- .../provider/provider_test.go | 15 +- module/mempool/execution_data.go | 8 +- module/mempool/herocache/backdata/cache.go | 7 + module/mempool/herocache/execution_data.go | 14 +- .../mempool/herocache/execution_data_test.go | 6 +- module/mempool/mock/execution_data.go | 2 +- module/metrics/access.go | 37 +-- module/metrics/alsp.go | 5 +- module/metrics/badger.go | 26 +- module/metrics/bitswap.go | 38 +-- module/metrics/cache.go | 17 +- module/metrics/chainsync.go | 25 +- module/metrics/cleaner.go | 6 +- module/metrics/collection.go | 15 +- module/metrics/compliance.go | 65 ++-- module/metrics/consensus.go | 17 +- module/metrics/cruisectl.go | 22 +- module/metrics/engine.go | 21 +- module/metrics/execution.go | 281 +++++++++--------- module/metrics/execution_data_requester.go | 29 +- module/metrics/execution_data_sync.go | 66 ++-- module/metrics/gossipsub.go | 37 +-- .../gossipsub_rpc_validation_inspector.go | 17 +- module/metrics/gossipsub_score.go | 37 +-- module/metrics/herocache.go | 131 +------- module/metrics/hotstuff.go | 73 ++--- module/metrics/internal/namespaces.go | 111 +++++++ module/metrics/libp2p_resource_manager.go | 59 ++-- module/metrics/loader.go | 12 +- module/metrics/mempool.go | 5 +- module/metrics/namespaces.go | 111 ------- module/metrics/network.go | 79 ++--- module/metrics/network/herocache.go | 131 ++++++++ module/metrics/observer.go | 6 +- module/metrics/ping.go | 13 +- module/metrics/rate_limited_blockstore.go | 5 +- module/metrics/rest_api.go | 17 +- module/metrics/transaction.go | 41 +-- module/metrics/unicast_manager.go | 25 +- module/metrics/verification.go | 69 ++--- .../execution_data_requester.go | 4 +- .../requester/distributer.go | 4 +- .../requester/execution_data_requester.go | 3 +- .../execution_data_requester_test.go | 29 +- .../requester/jobs/execution_data_reader.go | 4 +- .../jobs/execution_data_reader_test.go | 10 +- network/alsp/manager/manager.go | 5 +- network/errors.go | 2 +- network/internal/p2putils/utils.go | 2 +- network/p2p/blob/blob_service.go | 2 +- network/p2p/cache/gossipsub_spam_records.go | 2 +- network/p2p/connection/connection_gater.go | 2 +- .../p2p/connection/connection_gater_test.go | 2 +- network/p2p/connection/connector.go | 2 +- .../p2p/connection/internal/loggerNotifiee.go | 2 +- network/p2p/connection/peerManager.go | 2 +- network/p2p/dht/dht.go | 2 +- .../control_message_validation_inspector.go | 2 +- .../p2pbuilder/gossipsub/gossipSubBuilder.go | 9 +- network/p2p/p2pbuilder/utils.go | 2 +- network/p2p/p2pnet/network.go | 2 +- network/p2p/p2pnode/gossipsubMetrics.go | 2 +- network/p2p/p2pnode/libp2pNode.go | 2 +- network/p2p/p2pnode/libp2pNode_test.go | 2 +- network/p2p/p2pnode/protocolPeerCache.go | 2 +- network/p2p/scoring/registry.go | 2 +- network/p2p/test/topic_validator_test.go | 2 +- network/p2p/tracer/gossipSubMeshTracer.go | 7 +- network/p2p/tracer/gossipSubScoreTracer.go | 2 +- .../identity_provider_translator.go | 2 +- network/p2p/unicast/errors.go | 2 +- network/p2p/unicast/manager.go | 2 +- network/p2p/unicast/stream/errors.go | 2 +- network/test/unicast_authorization_test.go | 2 +- .../validator/authorized_sender_validator.go | 2 +- network/validator/pubsub/topic_validator.go | 2 +- .../p2plogging/internal/peerIdCache.go | 8 +- .../p2plogging/internal/peerIdCache_test.go | 11 +- {network/p2p => utils}/p2plogging/logging.go | 2 +- .../p2p => utils}/p2plogging/logging_test.go | 2 +- utils/unittest/fixtures.go | 35 +-- 117 files changed, 1112 insertions(+), 1031 deletions(-) rename module/executiondatasync/execution_data/{ => model}/entity.go (97%) rename module/executiondatasync/execution_data/{ => model}/execution_data.go (97%) create mode 100644 module/metrics/internal/namespaces.go delete mode 100644 module/metrics/namespaces.go create mode 100644 module/metrics/network/herocache.go rename {network/p2p => utils}/p2plogging/internal/peerIdCache.go (90%) rename {network/p2p => utils}/p2plogging/internal/peerIdCache_test.go (90%) rename {network/p2p => utils}/p2plogging/logging.go (92%) rename {network/p2p => utils}/p2plogging/logging_test.go (96%) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index d5b0e688cd4..9591eb1c99b 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -58,6 +58,7 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/module/metrics/unstaked" "github.com/onflow/flow-go/module/state_synchronization" edrequester "github.com/onflow/flow-go/module/state_synchronization/requester" @@ -367,7 +368,7 @@ func (builder *FlowAccessNodeBuilder) buildFollowerEngine() *FlowAccessNodeBuild builder.Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( @@ -1252,9 +1253,9 @@ func (builder *FlowAccessNodeBuilder) enqueuePublicNetworkInit() { msgValidators := publicNetworkMsgValidators(node.Logger.With().Bool("public", true).Logger(), node.IdentityProvider, builder.NodeID) receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + networkmetrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) - err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + err := node.Metrics.Mempool.Register(networkmetrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } @@ -1359,7 +1360,7 @@ func (builder *FlowAccessNodeBuilder) initPublicLibp2pNode(networkKey crypto.Pri }, &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + Metrics: networkmetrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, meshTracer). SetBasicResolver(builder.Resolver). diff --git a/cmd/collection/main.go b/cmd/collection/main.go index f285911bfdd..2368a43cbf1 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -46,6 +46,7 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -318,7 +319,7 @@ func main() { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 8aa1e46884a..05542dd8546 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -74,6 +74,7 @@ import ( finalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/blob" @@ -978,7 +979,7 @@ func (exeNode *ExecutionNode) LoadFollowerEngine( var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index ff6e5ff6da5..44e33b4a2bb 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -47,6 +47,7 @@ import ( "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" consensus_follower "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" alspmgr "github.com/onflow/flow-go/network/alsp/manager" @@ -60,7 +61,6 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/p2pbuilder" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/tracer" @@ -75,6 +75,7 @@ import ( "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/io" + "github.com/onflow/flow-go/utils/p2plogging" ) // ObserverBuilder extends cmd.NodeBuilder and declares additional functions needed to bootstrap an Access node @@ -348,7 +349,7 @@ func (builder *ObserverServiceBuilder) buildFollowerEngine() *ObserverServiceBui builder.Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) } packer := hotsignature.NewConsensusSigDataPacker(builder.Committee) verifier := verification.NewCombinedVerifier(builder.Committee, packer) // verifier for HotStuff signature constructs (QCs, TCs, votes) @@ -718,7 +719,7 @@ func (builder *ObserverServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr p2pconfig.PeerManagerDisableConfig(), // disable peer manager for observer node. &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + Metrics: networkmetrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, meshTracer). SetSubscriptionFilter( @@ -794,9 +795,9 @@ func (builder *ObserverServiceBuilder) enqueuePublicNetworkInit() { Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + networkmetrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) - err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + err := node.Metrics.Mempool.Register(networkmetrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } diff --git a/cmd/scaffold.go b/cmd/scaffold.go index ebfba2614c1..99c1a627baa 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -39,6 +39,7 @@ import ( "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/module/profiler" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/module/updatable_configs" @@ -244,8 +245,8 @@ func (fnb *FlowNodeBuilder) EnqueueResolver() { var dnsIpCacheMetricsCollector module.HeroCacheMetrics = metrics.NewNoopCollector() var dnsTxtCacheMetricsCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if fnb.HeroCacheMetricsEnable { - dnsIpCacheMetricsCollector = metrics.NetworkDnsIpCacheMetricsFactory(fnb.MetricsRegisterer) - dnsTxtCacheMetricsCollector = metrics.NetworkDnsTxtCacheMetricsFactory(fnb.MetricsRegisterer) + dnsIpCacheMetricsCollector = networkmetrics.NetworkDnsIpCacheMetricsFactory(fnb.MetricsRegisterer) + dnsTxtCacheMetricsCollector = networkmetrics.NetworkDnsTxtCacheMetricsFactory(fnb.MetricsRegisterer) } cache := herocache.NewDNSCache( @@ -365,7 +366,7 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { &fnb.FlowConfig.NetworkConfig.ConnectionManagerConfig, &p2p.DisallowListCacheConfig{ MaxSize: fnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: metrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + Metrics: networkmetrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), }) if err != nil { @@ -437,7 +438,7 @@ func (fnb *FlowNodeBuilder) InitFlowNetworkWithConduitFactory( receiveCache := netcache.NewHeroReceiveCache(fnb.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, fnb.Logger, - metrics.NetworkReceiveCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork)) + networkmetrics.NetworkReceiveCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork)) err := node.Metrics.Mempool.Register(metrics.ResourceNetworkingReceiveCache, receiveCache.Size) if err != nil { diff --git a/cmd/verification_builder.go b/cmd/verification_builder.go index ea5ddf65a8e..af9b265678c 100644 --- a/cmd/verification_builder.go +++ b/cmd/verification_builder.go @@ -33,6 +33,7 @@ import ( "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" @@ -359,7 +360,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( diff --git a/engine/access/state_stream/backend.go b/engine/access/state_stream/backend.go index 33c5e18cb77..324da01400b 100644 --- a/engine/access/state_stream/backend.go +++ b/engine/access/state_stream/backend.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) @@ -35,11 +36,11 @@ const ( DefaultResponseLimit = float64(0) ) -type GetExecutionDataFunc func(context.Context, uint64) (*execution_data.BlockExecutionDataEntity, error) +type GetExecutionDataFunc func(context.Context, uint64) (*model.BlockExecutionDataEntity, error) type GetStartHeightFunc func(flow.Identifier, uint64) (uint64, error) type API interface { - GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) + GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*model.BlockExecutionData, error) SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startBlockHeight uint64) Subscription SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter EventFilter) Subscription } @@ -126,7 +127,7 @@ func New( // getExecutionData returns the execution data for the given block height. // Expected errors during normal operation: // - storage.ErrNotFound or execution_data.BlobNotFoundError: execution data for the given block height is not available. -func (b *StateStreamBackend) getExecutionData(ctx context.Context, height uint64) (*execution_data.BlockExecutionDataEntity, error) { +func (b *StateStreamBackend) getExecutionData(ctx context.Context, height uint64) (*model.BlockExecutionDataEntity, error) { // fail early if no notification has been received for the given block height. // note: it's possible for the data to exist in the data store before the notification is // received. this ensures a consistent view is available to all streams. diff --git a/engine/access/state_stream/backend_executiondata.go b/engine/access/state_stream/backend_executiondata.go index 0443c6ba9ba..40e48be4003 100644 --- a/engine/access/state_stream/backend_executiondata.go +++ b/engine/access/state_stream/backend_executiondata.go @@ -14,12 +14,13 @@ import ( "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/storage" ) type ExecutionDataResponse struct { Height uint64 - ExecutionData *execution_data.BlockExecutionData + ExecutionData *model.BlockExecutionData } type ExecutionDataBackend struct { @@ -34,7 +35,7 @@ type ExecutionDataBackend struct { getStartHeight GetStartHeightFunc } -func (b *ExecutionDataBackend) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) { +func (b *ExecutionDataBackend) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*model.BlockExecutionData, error) { header, err := b.headers.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not get block header for %s: %w", blockID, err) diff --git a/engine/access/state_stream/backend_executiondata_test.go b/engine/access/state_stream/backend_executiondata_test.go index 361cb64aa80..524a304a942 100644 --- a/engine/access/state_stream/backend_executiondata_test.go +++ b/engine/access/state_stream/backend_executiondata_test.go @@ -20,6 +20,7 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" protocolmock "github.com/onflow/flow-go/state/protocol/mock" @@ -53,7 +54,7 @@ type BackendExecutionDataSuite struct { blocks []*flow.Block blockEvents map[flow.Identifier]flow.EventsList - execDataMap map[flow.Identifier]*execution_data.BlockExecutionDataEntity + execDataMap map[flow.Identifier]*model.BlockExecutionDataEntity blockMap map[uint64]*flow.Block sealMap map[flow.Identifier]*flow.Seal resultMap map[flow.Identifier]*flow.ExecutionResult @@ -89,7 +90,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { var err error blockCount := 5 - s.execDataMap = make(map[flow.Identifier]*execution_data.BlockExecutionDataEntity, blockCount) + s.execDataMap = make(map[flow.Identifier]*model.BlockExecutionDataEntity, blockCount) s.blockEvents = make(map[flow.Identifier]flow.EventsList, blockCount) s.blockMap = make(map[uint64]*flow.Block, blockCount) s.sealMap = make(map[flow.Identifier]*flow.Seal, blockCount) @@ -113,7 +114,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { blockEvents := unittest.BlockEventsFixture(block.Header, (i%len(testEventTypes))*3+1, testEventTypes...) numChunks := 5 - chunkDatas := make([]*execution_data.ChunkExecutionData, 0, numChunks) + chunkDatas := make([]*model.ChunkExecutionData, 0, numChunks) for i := 0; i < numChunks; i++ { var events flow.EventsList switch { @@ -124,7 +125,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { default: events = flow.EventsList{blockEvents.Events[i]} } - chunkDatas = append(chunkDatas, unittest.ChunkExecutionDataFixture(s.T(), execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events))) + chunkDatas = append(chunkDatas, unittest.ChunkExecutionDataFixture(s.T(), model.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events))) } execData := unittest.BlockExecutionDataFixture( unittest.WithBlockExecutionDataBlockID(block.ID()), @@ -135,7 +136,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { assert.NoError(s.T(), err) s.blocks = append(s.blocks, block) - s.execDataMap[block.ID()] = execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) + s.execDataMap[block.ID()] = model.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) s.blockEvents[block.ID()] = blockEvents.Events s.blockMap[block.Header.Height] = block s.sealMap[block.ID()] = seal diff --git a/engine/access/state_stream/engine.go b/engine/access/state_stream/engine.go index cb3a3e73813..64d67e2f65c 100644 --- a/engine/access/state_stream/engine.go +++ b/engine/access/state_stream/engine.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" @@ -130,7 +131,7 @@ func NewEng( // The caller must guarantee that execution data is locally available for all blocks with // heights between the initialBlockHeight provided during startup and the block height of // the execution data provided. -func (e *Engine) OnExecutionData(executionData *execution_data.BlockExecutionDataEntity) { +func (e *Engine) OnExecutionData(executionData *model.BlockExecutionDataEntity) { lg := e.log.With().Hex("block_id", logging.ID(executionData.BlockID)).Logger() lg.Trace().Msg("received execution data") diff --git a/engine/access/state_stream/mock/api.go b/engine/access/state_stream/mock/api.go index 5b57efc917f..c4779c1f24c 100644 --- a/engine/access/state_stream/mock/api.go +++ b/engine/access/state_stream/mock/api.go @@ -6,7 +6,7 @@ import ( context "context" flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" mock "github.com/stretchr/testify/mock" diff --git a/engine/access/state_stream/mock/get_execution_data_func.go b/engine/access/state_stream/mock/get_execution_data_func.go index 50fe8087e21..ee872a740a9 100644 --- a/engine/access/state_stream/mock/get_execution_data_func.go +++ b/engine/access/state_stream/mock/get_execution_data_func.go @@ -5,7 +5,8 @@ package mock import ( context "context" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + mock "github.com/stretchr/testify/mock" ) diff --git a/engine/common/rpc/convert/execution_data.go b/engine/common/rpc/convert/execution_data.go index 21d2297e16a..100a00c37fe 100644 --- a/engine/common/rpc/convert/execution_data.go +++ b/engine/common/rpc/convert/execution_data.go @@ -10,11 +10,11 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // BlockExecutionDataToMessage converts a BlockExecutionData to a protobuf message -func BlockExecutionDataToMessage(data *execution_data.BlockExecutionData) ( +func BlockExecutionDataToMessage(data *model.BlockExecutionData) ( *entities.BlockExecutionData, error, ) { @@ -36,11 +36,11 @@ func BlockExecutionDataToMessage(data *execution_data.BlockExecutionData) ( func MessageToBlockExecutionData( m *entities.BlockExecutionData, chain flow.Chain, -) (*execution_data.BlockExecutionData, error) { +) (*model.BlockExecutionData, error) { if m == nil { return nil, ErrEmptyMessage } - chunks := make([]*execution_data.ChunkExecutionData, len(m.ChunkExecutionData)) + chunks := make([]*model.ChunkExecutionData, len(m.ChunkExecutionData)) for i, chunk := range m.GetChunkExecutionData() { convertedChunk, err := MessageToChunkExecutionData(chunk, chain) if err != nil { @@ -49,14 +49,14 @@ func MessageToBlockExecutionData( chunks[i] = convertedChunk } - return &execution_data.BlockExecutionData{ + return &model.BlockExecutionData{ BlockID: MessageToIdentifier(m.GetBlockId()), ChunkExecutionDatas: chunks, }, nil } // ChunkExecutionDataToMessage converts a ChunkExecutionData to a protobuf message -func ChunkExecutionDataToMessage(data *execution_data.ChunkExecutionData) ( +func ChunkExecutionDataToMessage(data *model.ChunkExecutionData) ( *entities.ChunkExecutionData, error, ) { @@ -88,7 +88,7 @@ func ChunkExecutionDataToMessage(data *execution_data.ChunkExecutionData) ( func MessageToChunkExecutionData( m *entities.ChunkExecutionData, chain flow.Chain, -) (*execution_data.ChunkExecutionData, error) { +) (*model.ChunkExecutionData, error) { collection, err := messageToTrustedCollection(m.GetCollection(), chain) if err != nil { return nil, err @@ -107,7 +107,7 @@ func MessageToChunkExecutionData( events = nil } - return &execution_data.ChunkExecutionData{ + return &model.ChunkExecutionData{ Collection: collection, Events: events, TrieUpdate: trieUpdate, diff --git a/engine/common/rpc/convert/execution_data_test.go b/engine/common/rpc/convert/execution_data_test.go index 59c136c5b8a..73ba51a2ce2 100644 --- a/engine/common/rpc/convert/execution_data_test.go +++ b/engine/common/rpc/convert/execution_data_test.go @@ -9,7 +9,7 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/ledger/common/testutils" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/utils/unittest" ) @@ -20,7 +20,7 @@ func TestConvertBlockExecutionData(t *testing.T) { events := unittest.EventsFixture(5) chunks := 5 - chunkData := make([]*execution_data.ChunkExecutionData, 0, chunks) + chunkData := make([]*model.ChunkExecutionData, 0, chunks) for i := 0; i < chunks-1; i++ { ced := unittest.ChunkExecutionDataFixture(t, 0, // updates set explicitly to target 160-320KB per chunk @@ -29,7 +29,7 @@ func TestConvertBlockExecutionData(t *testing.T) { ) chunkData = append(chunkData, ced) } - makeServiceTx := func(ced *execution_data.ChunkExecutionData) { + makeServiceTx := func(ced *model.ChunkExecutionData) { // proposal key and payer are empty addresses for service tx collection := unittest.CollectionFixture(1) collection.Transactions[0].ProposalKey.Address = flow.EmptyAddress @@ -39,7 +39,7 @@ func TestConvertBlockExecutionData(t *testing.T) { // the service chunk sometimes does not have any trie updates ced.TrieUpdate = nil } - chunk := unittest.ChunkExecutionDataFixture(t, execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events), makeServiceTx) + chunk := unittest.ChunkExecutionDataFixture(t, model.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events), makeServiceTx) chunkData = append(chunkData, chunk) blockData := unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(chunkData...)) diff --git a/engine/execution/block_result.go b/engine/execution/block_result.go index cdb6e3d54f2..30e999d2047 100644 --- a/engine/execution/block_result.go +++ b/engine/execution/block_result.go @@ -3,7 +3,7 @@ package execution import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/entity" ) @@ -104,7 +104,7 @@ type BlockAttestationResult struct { // should be available as part of computation result and most likely trieUpdate // was the reason this is kept here, long term we don't need this data and should // act based on register deltas - *execution_data.BlockExecutionData + *model.BlockExecutionData } func NewEmptyBlockAttestationResult( @@ -114,10 +114,10 @@ func NewEmptyBlockAttestationResult( return &BlockAttestationResult{ BlockExecutionResult: blockExecutionResult, collectionAttestationResults: make([]CollectionAttestationResult, 0, colSize), - BlockExecutionData: &execution_data.BlockExecutionData{ + BlockExecutionData: &model.BlockExecutionData{ BlockID: blockExecutionResult.ID(), ChunkExecutionDatas: make( - []*execution_data.ChunkExecutionData, + []*model.ChunkExecutionData, 0, colSize), }, @@ -137,7 +137,7 @@ func (ar *BlockAttestationResult) AppendCollectionAttestationResult( endStateCommit flow.StateCommitment, stateProof flow.StorageProof, eventCommit flow.Identifier, - chunkExecutionDatas *execution_data.ChunkExecutionData, + chunkExecutionDatas *model.ChunkExecutionData, ) { ar.collectionAttestationResults = append(ar.collectionAttestationResults, CollectionAttestationResult{ diff --git a/engine/execution/computation/computer/result_collector.go b/engine/execution/computation/computer/result_collector.go index 8f5631252c2..564a78df438 100644 --- a/engine/execution/computation/computer/result_collector.go +++ b/engine/execution/computation/computer/result_collector.go @@ -19,7 +19,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/trace" @@ -156,7 +156,7 @@ func (collector *resultCollector) commitCollection( } col := collection.Collection() - chunkExecData := &execution_data.ChunkExecutionData{ + chunkExecData := &model.ChunkExecutionData{ Collection: &col, Events: events, TrieUpdate: trieUpdate, diff --git a/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go b/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go index 491307705eb..702ffcb6339 100644 --- a/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go +++ b/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go @@ -9,8 +9,8 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" executionDataMock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/metrics" @@ -112,7 +112,7 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { testTrieUpdate := &ledger.TrieUpdate{ RootHash: testTrieUpdateRootHash, } - testChunkExecutionDatas := []*execution_data.ChunkExecutionData{ + testChunkExecutionDatas := []*model.ChunkExecutionData{ { TrieUpdate: testTrieUpdate, }, @@ -170,7 +170,7 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { mockExecutionDataDowloader := new(executionDataMock.Downloader) mockExecutionDataDowloader.On("Get", mock.Anything, testEDID).Return( - &execution_data.BlockExecutionData{ + &model.BlockExecutionData{ BlockID: testBlockID, ChunkExecutionDatas: testChunkExecutionDatas, }, nil) @@ -260,9 +260,9 @@ func createTestBadgerRetryableUploaderWrapper(asyncUploader *AsyncUploader) *Bad mockExecutionDataDowloader := new(executionDataMock.Downloader) mockExecutionDataDowloader.On("Get", mock.Anything, mock.Anything).Return( - &execution_data.BlockExecutionData{ + &model.BlockExecutionData{ BlockID: flow.ZeroID, - ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, 0), + ChunkExecutionDatas: make([]*model.ChunkExecutionData, 0), }, nil) return NewBadgerRetryableUploaderWrapper( diff --git a/engine/execution/state/unittest/fixtures.go b/engine/execution/state/unittest/fixtures.go index 117f9e7ed19..dbe29e25576 100644 --- a/engine/execution/state/unittest/fixtures.go +++ b/engine/execution/state/unittest/fixtures.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/utils/unittest" ) @@ -45,7 +46,7 @@ func ComputationResultForBlockFixture( computationResult := execution.NewEmptyComputationResult(completeBlock) numberOfChunks := len(collections) + 1 - ceds := make([]*execution_data.ChunkExecutionData, numberOfChunks) + ceds := make([]*model.ChunkExecutionData, numberOfChunks) for i := 0; i < numberOfChunks; i++ { ceds[i] = unittest.ChunkExecutionDataFixture(t, 1024) computationResult.CollectionExecutionResultAt(i).UpdateExecutionSnapshot(StateInteractionsFixture()) diff --git a/engine/execution/testutil/fixtures.go b/engine/execution/testutil/fixtures.go index 3113f2df9af..8e10da57708 100644 --- a/engine/execution/testutil/fixtures.go +++ b/engine/execution/testutil/fixtures.go @@ -26,7 +26,7 @@ import ( "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/state/protocol" protocolMock "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" @@ -610,8 +610,8 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { return &execution.ComputationResult{ BlockExecutionResult: blockExecResult, BlockAttestationResult: &execution.BlockAttestationResult{ - BlockExecutionData: &execution_data.BlockExecutionData{ - ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ + BlockExecutionData: &model.BlockExecutionData{ + ChunkExecutionDatas: []*model.ChunkExecutionData{ {TrieUpdate: trieUpdate1}, {TrieUpdate: trieUpdate2}, {TrieUpdate: trieUpdate3}, diff --git a/follower/follower_builder.go b/follower/follower_builder.go index 2b32040c5b1..01c7c0a7610 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -36,6 +36,7 @@ import ( "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" alspmgr "github.com/onflow/flow-go/network/alsp/manager" @@ -50,7 +51,6 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/p2pbuilder" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/tracer" @@ -63,6 +63,7 @@ import ( badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/state/protocol/events/gadgets" + "github.com/onflow/flow-go/utils/p2plogging" ) // FlowBuilder extends cmd.NodeBuilder and declares additional functions needed to bootstrap an Access node @@ -242,7 +243,7 @@ func (builder *FollowerServiceBuilder) buildFollowerEngine() *FollowerServiceBui builder.Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) } packer := hotsignature.NewConsensusSigDataPacker(builder.Committee) @@ -597,7 +598,7 @@ func (builder *FollowerServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr p2pconfig.PeerManagerDisableConfig(), // disable peer manager for follower &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + Metrics: networkmetrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, meshTracer). SetSubscriptionFilter( @@ -672,9 +673,9 @@ func (builder *FollowerServiceBuilder) enqueuePublicNetworkInit() { Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + networkmetrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) - err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + err := node.Metrics.Mempool.Register(networkmetrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } diff --git a/insecure/cmd/corrupted_builder.go b/insecure/cmd/corrupted_builder.go index 6ceaa1b1a2a..df4ec583b65 100644 --- a/insecure/cmd/corrupted_builder.go +++ b/insecure/cmd/corrupted_builder.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/insecure/corruptnet" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/connection" @@ -107,7 +107,7 @@ func (cnb *CorruptedNodeBuilder) enqueueNetworkingLayer() { cnb.FlowConfig.NetworkConfig, &p2p.DisallowListCacheConfig{ MaxSize: cnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: metrics.DisallowListCacheMetricsFactory(cnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + Metrics: networkmetrics.DisallowListCacheMetricsFactory(cnb.HeroCacheMetricsFactory(), network.PrivateNetwork), }, cnb.TopicValidatorDisabled, cnb.WithPubSubMessageSigning, diff --git a/insecure/corruptlibp2p/pubsub_adapter.go b/insecure/corruptlibp2p/pubsub_adapter.go index 64975a18e3c..54271264d7a 100644 --- a/insecure/corruptlibp2p/pubsub_adapter.go +++ b/insecure/corruptlibp2p/pubsub_adapter.go @@ -15,8 +15,8 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) // CorruptGossipSubAdapter is a wrapper around the forked pubsub topic from diff --git a/module/chunks/chunkVerifier.go b/module/chunks/chunkVerifier.go index 88453f594b4..017e768ef7f 100644 --- a/module/chunks/chunkVerifier.go +++ b/module/chunks/chunkVerifier.go @@ -20,6 +20,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" ) @@ -344,7 +345,7 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( // 2. build our chunk's chunk execution data using the locally calculated values, and calculate // its CID - chunkExecutionData := execution_data.ChunkExecutionData{ + chunkExecutionData := model.ChunkExecutionData{ Collection: cedCollection, Events: events, TrieUpdate: trieUpdate, diff --git a/module/chunks/chunkVerifier_test.go b/module/chunks/chunkVerifier_test.go index b10b4da33b8..fdcac7344d5 100644 --- a/module/chunks/chunkVerifier_test.go +++ b/module/chunks/chunkVerifier_test.go @@ -28,6 +28,7 @@ import ( "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/chunks" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -335,7 +336,7 @@ func updateExecutionData(t *testing.T, vch *verification.VerifiableChunkData, co require.NoError(t, err) } - ced := execution_data.ChunkExecutionData{ + ced := model.ChunkExecutionData{ Collection: collection, Events: chunkEvents, TrieUpdate: trieUpdate, @@ -478,7 +479,7 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) (*veri trieUpdate, err := pathfinder.UpdateToTrieUpdate(update, partial.DefaultPathFinderVersion) require.NoError(t, err) - chunkExecutionData := execution_data.ChunkExecutionData{ + chunkExecutionData := model.ChunkExecutionData{ Collection: &coll, Events: chunkEvents, TrieUpdate: trieUpdate, diff --git a/module/executiondatasync/execution_data/cache/cache.go b/module/executiondatasync/execution_data/cache/cache.go index bfe497aac82..afe7559c5f7 100644 --- a/module/executiondatasync/execution_data/cache/cache.go +++ b/module/executiondatasync/execution_data/cache/cache.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/storage" ) @@ -44,13 +45,13 @@ func NewExecutionDataCache( // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { +func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Identifier) (*model.BlockExecutionDataEntity, error) { execData, err := c.backend.Get(ctx, executionDataID) if err != nil { return nil, err } - return execution_data.NewBlockExecutionDataEntity(executionDataID, execData), nil + return model.NewBlockExecutionDataEntity(executionDataID, execData), nil } // ByBlockID returns the execution data for the given block ID. @@ -60,7 +61,7 @@ func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Iden // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { +func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identifier) (*model.BlockExecutionDataEntity, error) { if execData, ok := c.cache.ByID(blockID); ok { return execData, nil } @@ -75,7 +76,7 @@ func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identif return nil, err } - execDataEntity := execution_data.NewBlockExecutionDataEntity(executionDataID, execData) + execDataEntity := model.NewBlockExecutionDataEntity(executionDataID, execData) _ = c.cache.Add(execDataEntity) @@ -89,7 +90,7 @@ func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identif // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (c *ExecutionDataCache) ByHeight(ctx context.Context, height uint64) (*execution_data.BlockExecutionDataEntity, error) { +func (c *ExecutionDataCache) ByHeight(ctx context.Context, height uint64) (*model.BlockExecutionDataEntity, error) { blockID, err := c.headers.BlockIDByHeight(height) if err != nil { return nil, err diff --git a/module/executiondatasync/execution_data/downloader.go b/module/executiondatasync/execution_data/downloader.go index 71905342c33..2befa0a0745 100644 --- a/module/executiondatasync/execution_data/downloader.go +++ b/module/executiondatasync/execution_data/downloader.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network" ) @@ -42,7 +43,7 @@ func WithSerializer(serializer Serializer) DownloaderOption { func NewDownloader(blobService network.BlobService, opts ...DownloaderOption) *downloader { d := &downloader{ blobService, - DefaultMaxBlobSize, + model.DefaultMaxBlobSize, DefaultSerializer, } @@ -69,7 +70,7 @@ func (d *downloader) Done() <-chan struct{} { // - BlobNotFoundError if some CID in the blob tree could not be found from the blob service // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) (*BlockExecutionData, error) { +func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) (*model.BlockExecutionData, error) { blobGetter := d.blobService.GetSession(ctx) // First, download the root execution data record which contains a list of chunk execution data @@ -82,7 +83,7 @@ func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) ( g, gCtx := errgroup.WithContext(ctx) // Next, download each of the chunk execution data blobs - chunkExecutionDatas := make([]*ChunkExecutionData, len(edRoot.ChunkExecutionDataIDs)) + chunkExecutionDatas := make([]*model.ChunkExecutionData, len(edRoot.ChunkExecutionDataIDs)) for i, chunkDataID := range edRoot.ChunkExecutionDataIDs { i := i chunkDataID := chunkDataID @@ -109,7 +110,7 @@ func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) ( } // Finally, recombine data into original record. - bed := &BlockExecutionData{ + bed := &model.BlockExecutionData{ BlockID: edRoot.BlockID, ChunkExecutionDatas: chunkExecutionDatas, } @@ -171,7 +172,7 @@ func (d *downloader) getChunkExecutionData( ctx context.Context, chunkExecutionDataID cid.Cid, blobGetter network.BlobGetter, -) (*ChunkExecutionData, error) { +) (*model.ChunkExecutionData, error) { cids := []cid.Cid{chunkExecutionDataID} // iteratively process each level of the blob tree until a ChunkExecutionData is returned or an @@ -183,7 +184,7 @@ func (d *downloader) getChunkExecutionData( } switch v := v.(type) { - case *ChunkExecutionData: + case *model.ChunkExecutionData: return v, nil case *[]cid.Cid: cids = *v diff --git a/module/executiondatasync/execution_data/downloader_test.go b/module/executiondatasync/execution_data/downloader_test.go index 775f4a68107..503b665e356 100644 --- a/module/executiondatasync/execution_data/downloader_test.go +++ b/module/executiondatasync/execution_data/downloader_test.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network/mocknetwork" ) @@ -22,7 +23,7 @@ func TestCIDNotFound(t *testing.T) { blobService := new(mocknetwork.BlobService) downloader := execution_data.NewDownloader(blobService) edStore := execution_data.NewExecutionDataStore(blobstore, execution_data.DefaultSerializer) - bed := generateBlockExecutionData(t, 10, 3*execution_data.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, 10, 3*model.DefaultMaxBlobSize) edID, err := edStore.Add(context.Background(), bed) require.NoError(t, err) diff --git a/module/executiondatasync/execution_data/mock/downloader.go b/module/executiondatasync/execution_data/mock/downloader.go index dfeafeeffbe..b03181e8c06 100644 --- a/module/executiondatasync/execution_data/mock/downloader.go +++ b/module/executiondatasync/execution_data/mock/downloader.go @@ -6,7 +6,7 @@ import ( context "context" flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" mock "github.com/stretchr/testify/mock" ) diff --git a/module/executiondatasync/execution_data/mock/execution_data_store.go b/module/executiondatasync/execution_data/mock/execution_data_store.go index c11c0f1cbce..cbb9ea1d3ee 100644 --- a/module/executiondatasync/execution_data/mock/execution_data_store.go +++ b/module/executiondatasync/execution_data/mock/execution_data_store.go @@ -6,7 +6,7 @@ import ( context "context" flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" mock "github.com/stretchr/testify/mock" ) diff --git a/module/executiondatasync/execution_data/entity.go b/module/executiondatasync/execution_data/model/entity.go similarity index 97% rename from module/executiondatasync/execution_data/entity.go rename to module/executiondatasync/execution_data/model/entity.go index 6facd5ad580..2a79a59c11e 100644 --- a/module/executiondatasync/execution_data/entity.go +++ b/module/executiondatasync/execution_data/model/entity.go @@ -1,4 +1,4 @@ -package execution_data +package model import ( "github.com/onflow/flow-go/model/flow" diff --git a/module/executiondatasync/execution_data/execution_data.go b/module/executiondatasync/execution_data/model/execution_data.go similarity index 97% rename from module/executiondatasync/execution_data/execution_data.go rename to module/executiondatasync/execution_data/model/execution_data.go index fb917c27ef2..56ef01bc7ae 100644 --- a/module/executiondatasync/execution_data/execution_data.go +++ b/module/executiondatasync/execution_data/model/execution_data.go @@ -1,4 +1,4 @@ -package execution_data +package model import ( "github.com/onflow/flow-go/ledger" diff --git a/module/executiondatasync/execution_data/serializer.go b/module/executiondatasync/execution_data/serializer.go index a5dfa60252c..9bf21bd661e 100644 --- a/module/executiondatasync/execution_data/serializer.go +++ b/module/executiondatasync/execution_data/serializer.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/encoding/cbor" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/compressor" ) @@ -53,7 +54,7 @@ func getCode(v interface{}) (byte, error) { switch v.(type) { case *flow.BlockExecutionDataRoot: return codeExecutionDataRoot, nil - case *ChunkExecutionData: + case *model.ChunkExecutionData: return codeChunkExecutionData, nil case []cid.Cid: return codeRecursiveCIDs, nil @@ -69,7 +70,7 @@ func getPrototype(code byte) (interface{}, error) { case codeExecutionDataRoot: return &flow.BlockExecutionDataRoot{}, nil case codeChunkExecutionData: - return &ChunkExecutionData{}, nil + return &model.ChunkExecutionData{}, nil case codeRecursiveCIDs: return &[]cid.Cid{}, nil default: diff --git a/module/executiondatasync/execution_data/store.go b/module/executiondatasync/execution_data/store.go index 8d31a8a0c4f..a72e9590306 100644 --- a/module/executiondatasync/execution_data/store.go +++ b/module/executiondatasync/execution_data/store.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // ExecutionDataGetter handles getting execution data from a blobstore @@ -19,7 +20,7 @@ type ExecutionDataGetter interface { // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size - Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) + Get(ctx context.Context, rootID flow.Identifier) (*model.BlockExecutionData, error) } // ExecutionDataStore handles adding / getting execution data to / from a blobstore @@ -29,7 +30,7 @@ type ExecutionDataStore interface { // Add constructs a blob tree for the given BlockExecutionData, adds it to the blobstore, // then returns the root CID. // No errors are expected during normal operation. - Add(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) + Add(ctx context.Context, executionData *model.BlockExecutionData) (flow.Identifier, error) } type ExecutionDataStoreOption func(*store) @@ -54,7 +55,7 @@ func NewExecutionDataStore(blobstore blobs.Blobstore, serializer Serializer, opt s := &store{ blobstore: blobstore, serializer: serializer, - maxBlobSize: DefaultMaxBlobSize, + maxBlobSize: model.DefaultMaxBlobSize, } for _, opt := range opts { @@ -67,7 +68,7 @@ func NewExecutionDataStore(blobstore blobs.Blobstore, serializer Serializer, opt // Add constructs a blob tree for the given BlockExecutionData, adds it to the blobstore, // then returns the rootID. // No errors are expected during normal operation. -func (s *store) Add(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) { +func (s *store) Add(ctx context.Context, executionData *model.BlockExecutionData) (flow.Identifier, error) { executionDataRoot := &flow.BlockExecutionDataRoot{ BlockID: executionData.BlockID, ChunkExecutionDataIDs: make([]cid.Cid, len(executionData.ChunkExecutionDatas)), @@ -114,7 +115,7 @@ func (s *store) Add(ctx context.Context, executionData *BlockExecutionData) (flo // addChunkExecutionData constructs a blob tree for the given ChunkExecutionData, adds it to the // blobstore, and returns the root CID. // No errors are expected during normal operation. -func (s *store) addChunkExecutionData(ctx context.Context, chunkExecutionData *ChunkExecutionData) (cid.Cid, error) { +func (s *store) addChunkExecutionData(ctx context.Context, chunkExecutionData *model.ChunkExecutionData) (cid.Cid, error) { var v interface{} = chunkExecutionData // given an arbitrarily large v, split it into blobs of size up to maxBlobSize, adding them to @@ -177,7 +178,7 @@ func (s *store) addBlobs(ctx context.Context, v interface{}) ([]cid.Cid, error) // Expected errors during normal operations: // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized -func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) { +func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*model.BlockExecutionData, error) { rootCid := flow.IdToCid(rootID) // first, get the root blob. it will contain a list of blobs, one for each chunk @@ -201,9 +202,9 @@ func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutio } // next, get each chunk blob and deserialize it - blockExecutionData := &BlockExecutionData{ + blockExecutionData := &model.BlockExecutionData{ BlockID: executionDataRoot.BlockID, - ChunkExecutionDatas: make([]*ChunkExecutionData, len(executionDataRoot.ChunkExecutionDataIDs)), + ChunkExecutionDatas: make([]*model.ChunkExecutionData, len(executionDataRoot.ChunkExecutionDataIDs)), } for i, chunkExecutionDataID := range executionDataRoot.ChunkExecutionDataIDs { @@ -222,7 +223,7 @@ func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutio // Expected errors during normal operations: // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized -func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID cid.Cid) (*ChunkExecutionData, error) { +func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID cid.Cid) (*model.ChunkExecutionData, error) { cids := []cid.Cid{chunkExecutionDataID} // given a root CID, get the blob tree level by level, until we reach the full ChunkExecutionData @@ -233,7 +234,7 @@ func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID } switch v := v.(type) { - case *ChunkExecutionData: + case *model.ChunkExecutionData: return v, nil case *[]cid.Cid: cids = *v diff --git a/module/executiondatasync/execution_data/store_test.go b/module/executiondatasync/execution_data/store_test.go index f1784201766..ab6fe3c0130 100644 --- a/module/executiondatasync/execution_data/store_test.go +++ b/module/executiondatasync/execution_data/store_test.go @@ -20,6 +20,7 @@ import ( "github.com/onflow/flow-go/ledger/common/testutils" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/utils/unittest" ) @@ -31,8 +32,8 @@ func getExecutionDataStore(blobstore blobs.Blobstore, serializer execution_data. return execution_data.NewExecutionDataStore(blobstore, serializer) } -func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *execution_data.ChunkExecutionData { - ced := &execution_data.ChunkExecutionData{ +func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *model.ChunkExecutionData { + ced := &model.ChunkExecutionData{ TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8), } @@ -58,10 +59,10 @@ func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *executi } } -func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData { - bed := &execution_data.BlockExecutionData{ +func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *model.BlockExecutionData { + bed := &model.BlockExecutionData{ BlockID: unittest.IdentifierFixture(), - ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, numChunks), + ChunkExecutionDatas: make([]*model.ChunkExecutionData, numChunks), } for i := 0; i < numChunks; i++ { @@ -84,7 +85,7 @@ func getAllKeys(t *testing.T, bs blobs.Blobstore) []cid.Cid { return cids } -func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) { +func deepEqual(t *testing.T, expected, actual *model.BlockExecutionData) { assert.Equal(t, expected.BlockID, actual.BlockID) assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas)) @@ -111,8 +112,8 @@ func TestHappyPath(t *testing.T) { deepEqual(t, expected, actual) } - test(1, 0) // small execution data (single level blob tree) - test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree) + test(1, 0) // small execution data (single level blob tree) + test(5, 5*model.DefaultMaxBlobSize) // large execution data (multi level blob tree) } type randomSerializer struct{} @@ -140,7 +141,7 @@ func newCorruptedTailSerializer(numChunks int) *corruptedTailSerializer { } func (cts *corruptedTailSerializer) Serialize(w io.Writer, v interface{}) error { - if _, ok := v.(*execution_data.ChunkExecutionData); ok { + if _, ok := v.(*model.ChunkExecutionData); ok { cts.i++ if cts.i == cts.corruptedChunk { buf := &bytes.Buffer{} @@ -168,7 +169,7 @@ func (cts *corruptedTailSerializer) Deserialize(r io.Reader) (interface{}, error func TestMalformedData(t *testing.T) { t.Parallel() - test := func(bed *execution_data.BlockExecutionData, serializer execution_data.Serializer) { + test := func(bed *model.BlockExecutionData, serializer execution_data.Serializer) { blobstore := getBlobstore() defaultEds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer) malformedEds := getExecutionDataStore(blobstore, serializer) @@ -179,7 +180,7 @@ func TestMalformedData(t *testing.T) { } numChunks := 5 - bed := generateBlockExecutionData(t, numChunks, 10*execution_data.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, numChunks, 10*model.DefaultMaxBlobSize) test(bed, &randomSerializer{}) // random bytes test(bed, newCorruptedTailSerializer(numChunks)) // serialized execution data with random bytes replaced at the end of a random chunk @@ -191,7 +192,7 @@ func TestGetIncompleteData(t *testing.T) { blobstore := getBlobstore() eds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer) - bed := generateBlockExecutionData(t, 5, 10*execution_data.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, 5, 10*model.DefaultMaxBlobSize) rootID, err := eds.Add(context.Background(), bed) require.NoError(t, err) diff --git a/module/executiondatasync/execution_data/util.go b/module/executiondatasync/execution_data/util.go index 50582d19840..cf795d4a57b 100644 --- a/module/executiondatasync/execution_data/util.go +++ b/module/executiondatasync/execution_data/util.go @@ -5,11 +5,12 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // CalculateID calculates the root ID of the given execution data without storing any data. // No errors are expected during normal operation. -func CalculateID(ctx context.Context, execData *BlockExecutionData, serializer Serializer) (flow.Identifier, error) { +func CalculateID(ctx context.Context, execData *model.BlockExecutionData, serializer Serializer) (flow.Identifier, error) { executionDatastore := NewExecutionDataStore(blobs.NewNoopBlobstore(), serializer) id, err := executionDatastore.Add(ctx, execData) diff --git a/module/executiondatasync/provider/provider.go b/module/executiondatasync/provider/provider.go index ac5c3fe700d..9d9f80c9329 100644 --- a/module/executiondatasync/provider/provider.go +++ b/module/executiondatasync/provider/provider.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/tracker" "github.com/onflow/flow-go/network" ) @@ -29,7 +30,7 @@ func WithBlobSizeLimit(size int) ProviderOption { // Provider is used to provide execution data blobs over the network via a blob service. type Provider interface { - Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) + Provide(ctx context.Context, blockHeight uint64, executionData *model.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) } type ExecutionDataProvider struct { @@ -54,7 +55,7 @@ func NewProvider( p := &ExecutionDataProvider{ logger: logger.With().Str("component", "execution_data_provider").Logger(), metrics: metrics, - maxBlobSize: execution_data.DefaultMaxBlobSize, + maxBlobSize: model.DefaultMaxBlobSize, cidsProvider: NewExecutionDataCIDProvider(serializer), blobService: blobService, storage: storage, @@ -123,7 +124,7 @@ func (p *ExecutionDataProvider) storeBlobs(parent context.Context, blockHeight u // It computes and returns the root CID of the execution data blob tree. // This function returns once the root CID has been computed, and all blobs are successfully stored // in the Bitswap Blobstore. -func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) { +func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, executionData *model.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) { rootID, rootData, errCh, err := p.provide(ctx, blockHeight, executionData) storeErr, ok := <-errCh @@ -142,7 +143,7 @@ func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, return rootID, rootData, nil } -func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, <-chan error, error) { +func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, executionData *model.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, <-chan error, error) { logger := p.logger.With().Uint64("height", blockHeight).Str("block_id", executionData.BlockID.String()).Logger() logger.Debug().Msg("providing execution data") @@ -195,7 +196,7 @@ func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, func NewExecutionDataCIDProvider(serializer execution_data.Serializer) *ExecutionDataCIDProvider { return &ExecutionDataCIDProvider{ serializer: serializer, - maxBlobSize: execution_data.DefaultMaxBlobSize, + maxBlobSize: model.DefaultMaxBlobSize, } } @@ -211,7 +212,7 @@ func (p *ExecutionDataCIDProvider) CalculateExecutionDataRootID( } func (p *ExecutionDataCIDProvider) CalculateChunkExecutionDataID( - ced execution_data.ChunkExecutionData, + ced model.ChunkExecutionData, ) (cid.Cid, error) { return p.addChunkExecutionData(&ced, nil) } @@ -243,7 +244,7 @@ func (p *ExecutionDataCIDProvider) addExecutionDataRoot( } func (p *ExecutionDataCIDProvider) addChunkExecutionData( - ced *execution_data.ChunkExecutionData, + ced *model.ChunkExecutionData, blobCh chan<- blobs.Blob, ) (cid.Cid, error) { cids, err := p.addBlobs(ced, blobCh) diff --git a/module/executiondatasync/provider/provider_test.go b/module/executiondatasync/provider/provider_test.go index b88033a7de1..1117bd1b8ac 100644 --- a/module/executiondatasync/provider/provider_test.go +++ b/module/executiondatasync/provider/provider_test.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock" "github.com/onflow/flow-go/module/metrics" @@ -52,8 +53,8 @@ func getProvider(blobService network.BlobService) provider.Provider { ) } -func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData { - chunkData := make([]*execution_data.ChunkExecutionData, 0, numChunks) +func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *model.BlockExecutionData { + chunkData := make([]*model.ChunkExecutionData, 0, numChunks) for i := 0; i < numChunks; i++ { chunkData = append(chunkData, unittest.ChunkExecutionDataFixture(t, int(minSerializedSizePerChunk))) } @@ -61,7 +62,7 @@ func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePe return unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(chunkData...)) } -func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) { +func deepEqual(t *testing.T, expected, actual *model.BlockExecutionData) { assert.Equal(t, expected.BlockID, actual.BlockID) assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas)) @@ -94,14 +95,14 @@ func TestHappyPath(t *testing.T) { assert.Len(t, executionDataRoot.ChunkExecutionDataIDs, numChunks) } - test(1, 0) // small execution data (single level blob tree) - test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree) + test(1, 0) // small execution data (single level blob tree) + test(5, 5*model.DefaultMaxBlobSize) // large execution data (multi level blob tree) } func TestProvideContextCanceled(t *testing.T) { t.Parallel() - bed := generateBlockExecutionData(t, 5, 5*execution_data.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, 5, 5*model.DefaultMaxBlobSize) provider := getProvider(getBlobservice(getDatastore())) _, _, err := provider.Provide(context.Background(), 0, bed) @@ -151,7 +152,7 @@ func TestCalculateChunkExecutionDataID(t *testing.T) { require.NoError(t, err) expected := cid.MustParse("QmYSvEvCYCaMJXjCdWLzFYqMBzxgiE5GzEGQCKqHKM8KkP") - ced := execution_data.ChunkExecutionData{ + ced := model.ChunkExecutionData{ Collection: &flow.Collection{ Transactions: []*flow.TransactionBody{ {Script: []byte("pub fun main() {}")}, diff --git a/module/mempool/execution_data.go b/module/mempool/execution_data.go index 88d466c146b..fff3c03d238 100644 --- a/module/mempool/execution_data.go +++ b/module/mempool/execution_data.go @@ -2,7 +2,7 @@ package mempool import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // ExecutionData represents a concurrency-safe memory pool for BlockExecutionData. @@ -14,7 +14,7 @@ type ExecutionData interface { // Add adds a block execution data to the mempool, keyed by block ID. // It returns false if the execution data was already in the mempool. - Add(*execution_data.BlockExecutionDataEntity) bool + Add(*model.BlockExecutionDataEntity) bool // Remove removes block execution data from mempool by block ID. // It returns true if the execution data was known and removed. @@ -22,14 +22,14 @@ type ExecutionData interface { // ByID returns the block execution data for the given block ID from the mempool. // It returns false if the execution data was not found in the mempool. - ByID(flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) + ByID(flow.Identifier) (*model.BlockExecutionDataEntity, bool) // Size return the current size of the memory pool. Size() uint // All retrieves all execution data that are currently in the memory pool // as a slice. - All() []*execution_data.BlockExecutionDataEntity + All() []*model.BlockExecutionDataEntity // Clear removes all execution data from the mempool. Clear() diff --git a/module/mempool/herocache/backdata/cache.go b/module/mempool/herocache/backdata/cache.go index 2ac93e38957..d353fda34a3 100644 --- a/module/mempool/herocache/backdata/cache.go +++ b/module/mempool/herocache/backdata/cache.go @@ -11,6 +11,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/logging" ) @@ -149,6 +150,12 @@ func NewCache(sizeLimit uint32, return bd } +// NewCacheWithNoopLoggerAndMetrics is a helper function to create a new Cache with a no-op logger and metrics collector. +// This is useful for lower-level components that do not need to log or collect metrics, and want to avoid the overhead of import cycles. +func NewCacheWithNoopLoggerAndMetrics(sizeLimit uint32, oversizeFactor uint32, ejectionMode heropool.EjectionMode, opts ...CacheOpt) *Cache { + return NewCache(sizeLimit, oversizeFactor, ejectionMode, zerolog.Nop(), metrics.NewNoopCollector(), opts...) +} + // Has checks if backdata already contains the entity with the given identifier. func (c *Cache) Has(entityID flow.Identifier) bool { defer c.logTelemetry() diff --git a/module/mempool/herocache/execution_data.go b/module/mempool/herocache/execution_data.go index 9a075692578..a36753a2c2d 100644 --- a/module/mempool/herocache/execution_data.go +++ b/module/mempool/herocache/execution_data.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/herocache/internal" @@ -39,14 +39,14 @@ func (t *BlockExecutionData) Has(blockID flow.Identifier) bool { // Add adds a block execution data to the mempool, keyed by block ID. // It returns false if the execution data was already in the mempool. -func (t *BlockExecutionData) Add(ed *execution_data.BlockExecutionDataEntity) bool { +func (t *BlockExecutionData) Add(ed *model.BlockExecutionDataEntity) bool { entity := internal.NewWrappedEntity(ed.BlockID, ed) return t.c.Add(*entity) } // ByID returns the block execution data for the given block ID from the mempool. // It returns false if the execution data was not found in the mempool. -func (t *BlockExecutionData) ByID(blockID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { +func (t *BlockExecutionData) ByID(blockID flow.Identifier) (*model.BlockExecutionDataEntity, bool) { entity, exists := t.c.ByID(blockID) if !exists { return nil, false @@ -57,9 +57,9 @@ func (t *BlockExecutionData) ByID(blockID flow.Identifier) (*execution_data.Bloc // All returns all block execution data from the mempool. Since it is using the HeroCache, All guarantees returning // all block execution data in the same order as they are added. -func (t *BlockExecutionData) All() []*execution_data.BlockExecutionDataEntity { +func (t *BlockExecutionData) All() []*model.BlockExecutionDataEntity { entities := t.c.All() - eds := make([]*execution_data.BlockExecutionDataEntity, 0, len(entities)) + eds := make([]*model.BlockExecutionDataEntity, 0, len(entities)) for _, entity := range entities { eds = append(eds, unwrap(entity)) } @@ -83,13 +83,13 @@ func (t *BlockExecutionData) Remove(blockID flow.Identifier) bool { } // unwrap converts an internal.WrappedEntity to a BlockExecutionDataEntity. -func unwrap(entity flow.Entity) *execution_data.BlockExecutionDataEntity { +func unwrap(entity flow.Entity) *model.BlockExecutionDataEntity { wrappedEntity, ok := entity.(internal.WrappedEntity) if !ok { panic(fmt.Sprintf("invalid wrapped entity in block execution data pool (%T)", entity)) } - ed, ok := wrappedEntity.Entity.(*execution_data.BlockExecutionDataEntity) + ed, ok := wrappedEntity.Entity.(*model.BlockExecutionDataEntity) if !ok { panic(fmt.Sprintf("invalid entity in block execution data pool (%T)", wrappedEntity.Entity)) } diff --git a/module/mempool/herocache/execution_data_test.go b/module/mempool/herocache/execution_data_test.go index 46c0d302956..8d32de2e081 100644 --- a/module/mempool/herocache/execution_data_test.go +++ b/module/mempool/herocache/execution_data_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -70,7 +70,7 @@ func TestBlockExecutionDataConcurrentWriteAndRead(t *testing.T) { // storing all cache for i := 0; i < total; i++ { - go func(ed *execution_data.BlockExecutionDataEntity) { + go func(ed *model.BlockExecutionDataEntity) { require.True(t, cache.Add(ed)) wg.Done() @@ -83,7 +83,7 @@ func TestBlockExecutionDataConcurrentWriteAndRead(t *testing.T) { wg.Add(total) // reading all cache for i := 0; i < total; i++ { - go func(ed *execution_data.BlockExecutionDataEntity) { + go func(ed *model.BlockExecutionDataEntity) { actual, ok := cache.ByID(ed.BlockID) require.True(t, ok) require.Equal(t, ed, actual) diff --git a/module/mempool/mock/execution_data.go b/module/mempool/mock/execution_data.go index 9a9b1669daf..d25f1c24c53 100644 --- a/module/mempool/mock/execution_data.go +++ b/module/mempool/mock/execution_data.go @@ -4,7 +4,7 @@ package mempool import ( flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" mock "github.com/stretchr/testify/mock" ) diff --git a/module/metrics/access.go b/module/metrics/access.go index 1116f87f433..64673a22bd7 100644 --- a/module/metrics/access.go +++ b/module/metrics/access.go @@ -6,6 +6,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/counters" + "github.com/onflow/flow-go/module/metrics/internal" ) type AccessCollectorOpts func(*AccessCollector) @@ -53,56 +54,56 @@ func NewAccessCollector(opts ...AccessCollectorOpts) *AccessCollector { ac := &AccessCollector{ connectionReused: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_reused", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of times connections get reused", }), connectionsInPool: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "connections_in_pool", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of connections in the pool against max number tne pool can hold", }, []string{"result"}), connectionAdded: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_added", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of times connections are added to the pool", }), connectionEstablished: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_established", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of times connections are established", }), connectionInvalidated: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_invalidated", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of times connections are invalidated", }), connectionUpdated: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_updated", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of times existing connections from the pool are updated", }), connectionEvicted: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_evicted", - Namespace: namespaceAccess, - Subsystem: subsystemConnectionPool, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemConnectionPool, Help: "counter for the number of times a cached connection is evicted from the connection pool", }), lastFullBlockHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "last_full_finalized_block_height", - Namespace: namespaceAccess, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemIngestion, Help: "gauge to track the highest consecutive finalized block height with all collections indexed", }), maxReceiptHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "max_receipt_height", - Namespace: namespaceAccess, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemIngestion, Help: "gauge to track the maximum block height of execution receipts received", }), maxReceiptHeightValue: counters.NewMonotonousCounter(0), diff --git a/module/metrics/alsp.go b/module/metrics/alsp.go index 3d5dc2bc510..2c459231004 100644 --- a/module/metrics/alsp.go +++ b/module/metrics/alsp.go @@ -4,6 +4,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) // AlspMetrics is a struct that contains all the metrics related to the ALSP module. @@ -25,8 +26,8 @@ func NewAlspMetrics() *AlspMetrics { alsp.reportedMisbehaviorCount = prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemAlsp, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemAlsp, Name: "reported_misbehavior_total", Help: "number of reported spamming misbehavior received by alsp", }, []string{LabelChannel, LabelMisbehavior}, diff --git a/module/metrics/badger.go b/module/metrics/badger.go index 4b643336170..be91f1afed4 100644 --- a/module/metrics/badger.go +++ b/module/metrics/badger.go @@ -6,37 +6,39 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" + + "github.com/onflow/flow-go/module/metrics/internal" ) func RegisterBadgerMetrics() error { expvarCol := collectors.NewExpvarCollector(map[string]*prometheus.Desc{ "badger_disk_reads_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_disk_reads_total", namespaceStorage, subsystemBadger), "cumulative number of reads", nil, nil), + fmt.Sprintf("%s_%s_disk_reads_total", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of reads", nil, nil), "badger_disk_writes_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_disk_writes_total", namespaceStorage, subsystemBadger), "cumulative number of writes", nil, nil), + fmt.Sprintf("%s_%s_disk_writes_total", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of writes", nil, nil), "badger_read_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_read_bytes", namespaceStorage, subsystemBadger), "cumulative number of bytes read", nil, nil), + fmt.Sprintf("%s_%s_read_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of bytes read", nil, nil), "badger_written_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_written_bytes", namespaceStorage, subsystemBadger), "cumulative number of bytes written", nil, nil), + fmt.Sprintf("%s_%s_written_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of bytes written", nil, nil), "badger_gets_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_gets_total", namespaceStorage, subsystemBadger), "number of gets", nil, nil), + fmt.Sprintf("%s_%s_gets_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of gets", nil, nil), "badger_memtable_gets_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_memtable_gets_total", namespaceStorage, subsystemBadger), "number of memtable gets", nil, nil), + fmt.Sprintf("%s_%s_memtable_gets_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of memtable gets", nil, nil), "badger_puts_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_puts_total", namespaceStorage, subsystemBadger), "number of puts", nil, nil), + fmt.Sprintf("%s_%s_puts_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of puts", nil, nil), // NOTE: variable exists, but not used in badger yet //"badger_blocked_puts_total": prometheus.NewDesc( // fmt.Sprintf("%s_%s_blocked_puts_total", namespaceStorage, subsystemBadger), "number of blocked puts", nil, nil), "badger_pending_writes_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_badger_pending_writes_total", namespaceStorage, subsystemBadger), "tracks the number of pending writes", []string{"path"}, nil), + fmt.Sprintf("%s_%s_badger_pending_writes_total", internal.NamespaceStorage, internal.SubsystemBadger), "tracks the number of pending writes", []string{"path"}, nil), "badger_lsm_bloom_hits_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_lsm_bloom_hits_total", namespaceStorage, subsystemBadger), "number of LSM bloom hits", []string{"level"}, nil), + fmt.Sprintf("%s_%s_lsm_bloom_hits_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of LSM bloom hits", []string{"level"}, nil), "badger_lsm_level_gets_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_lsm_level_gets_total", namespaceStorage, subsystemBadger), "number of LSM gets", []string{"level"}, nil), + fmt.Sprintf("%s_%s_lsm_level_gets_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of LSM gets", []string{"level"}, nil), "badger_lsm_size_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_lsm_size_bytes", namespaceStorage, subsystemBadger), "size of the LSM in bytes", []string{"path"}, nil), + fmt.Sprintf("%s_%s_lsm_size_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "size of the LSM in bytes", []string{"path"}, nil), "badger_vlog_size_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_vlog_size_bytes", namespaceStorage, subsystemBadger), "size of the value log in bytes", []string{"path"}, nil), + fmt.Sprintf("%s_%s_vlog_size_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "size of the value log in bytes", []string{"path"}, nil), }) err := prometheus.Register(expvarCol) diff --git a/module/metrics/bitswap.go b/module/metrics/bitswap.go index d279e9f7292..4b73442416c 100644 --- a/module/metrics/bitswap.go +++ b/module/metrics/bitswap.go @@ -3,6 +3,8 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module/metrics/internal" ) type BitswapCollector struct { @@ -21,56 +23,56 @@ func NewBitswapCollector() *BitswapCollector { bc := &BitswapCollector{ peers: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "num_peers", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the number of connected peers", }, []string{"prefix"}), wantlist: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "wantlist_size", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the wantlist size", }, []string{"prefix"}), blobsReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "blobs_received", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the number of received blobs", }, []string{"prefix"}), dataReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "data_received", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the amount of data received", }, []string{"prefix"}), blobsSent: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "blobs_sent", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the number of sent blobs", }, []string{"prefix"}), dataSent: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "data_sent", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the amount of data sent", }, []string{"prefix"}), dupBlobsReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "dup_blobs_received", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the number of duplicate blobs received", }, []string{"prefix"}), dupDataReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "dup_data_received", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the amount of duplicate data received", }, []string{"prefix"}), messagesReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "messages_received", - Namespace: namespaceNetwork, - Subsystem: subsystemBitswap, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemBitswap, Help: "the number of messages received", }, []string{"prefix"}), } diff --git a/module/metrics/cache.go b/module/metrics/cache.go index 3337d683a24..b432e5d7bff 100644 --- a/module/metrics/cache.go +++ b/module/metrics/cache.go @@ -5,6 +5,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics/internal" ) type CacheCollector struct { @@ -20,32 +21,32 @@ func NewCacheCollector(chain flow.ChainID) *CacheCollector { entries: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "entries_total", - Namespace: namespaceStorage, - Subsystem: subsystemCache, + Namespace: internal.NamespaceStorage, + Subsystem: internal.SubsystemCache, Help: "the number of entries in the cache", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), hits: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "hits_total", - Namespace: namespaceStorage, - Subsystem: subsystemCache, + Namespace: internal.NamespaceStorage, + Subsystem: internal.SubsystemCache, Help: "the number of hits for the cache", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), notfounds: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "notfounds_total", - Namespace: namespaceStorage, - Subsystem: subsystemCache, + Namespace: internal.NamespaceStorage, + Subsystem: internal.SubsystemCache, Help: "the number of times the queried item was not found in either cache or database", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), misses: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "misses_total", - Namespace: namespaceStorage, - Subsystem: subsystemCache, + Namespace: internal.NamespaceStorage, + Subsystem: internal.SubsystemCache, Help: "the number of times the queried item was not found in cache, but found in database", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), diff --git a/module/metrics/chainsync.go b/module/metrics/chainsync.go index b58718ce81d..c785bc37806 100644 --- a/module/metrics/chainsync.go +++ b/module/metrics/chainsync.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/model/chainsync" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics/internal" ) type ChainSyncCollector struct { @@ -25,40 +26,40 @@ func NewChainSyncCollector(chainID flow.ChainID) *ChainSyncCollector { chainID: chainID, timeToPruned: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "time_to_pruned_seconds", - Namespace: namespaceChainsync, - Subsystem: subsystemSyncCore, + Namespace: internal.NamespaceChainsync, + Subsystem: internal.SubsystemSyncCore, Help: "the time between queueing and pruning a block in seconds", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, }, []string{"status", "requested_by"}), timeToReceived: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "time_to_received_seconds", - Namespace: namespaceChainsync, - Subsystem: subsystemSyncCore, + Namespace: internal.NamespaceChainsync, + Subsystem: internal.SubsystemSyncCore, Help: "the time between queueing and receiving a block in seconds", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, }, []string{"requested_by"}), totalPruned: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "blocks_pruned_total", - Namespace: namespaceChainsync, - Subsystem: subsystemSyncCore, + Namespace: internal.NamespaceChainsync, + Subsystem: internal.SubsystemSyncCore, Help: "the total number of blocks pruned by 'id' or 'height'", }, []string{"requested_by"}), storedBlocks: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "blocks_stored_total", - Namespace: namespaceChainsync, - Subsystem: subsystemSyncCore, + Namespace: internal.NamespaceChainsync, + Subsystem: internal.SubsystemSyncCore, Help: "the total number of blocks currently stored by 'id' or 'height'", }, []string{"requested_by"}), totalHeightsRequested: prometheus.NewCounter(prometheus.CounterOpts{ Name: "block_heights_requested_total", - Namespace: namespaceChainsync, - Subsystem: subsystemSyncCore, + Namespace: internal.NamespaceChainsync, + Subsystem: internal.SubsystemSyncCore, Help: "the total number of blocks requested by height, including retried requests for the same heights. Eg: a range of 1-10 would increase the counter by 10", }), totalIdsRequested: prometheus.NewCounter(prometheus.CounterOpts{ Name: "block_ids_requested_total", - Namespace: namespaceChainsync, - Subsystem: subsystemSyncCore, + Namespace: internal.NamespaceChainsync, + Subsystem: internal.SubsystemSyncCore, Help: "the total number of blocks requested by id", }), } diff --git a/module/metrics/cleaner.go b/module/metrics/cleaner.go index 57bddb9de71..ed8fc946c8c 100644 --- a/module/metrics/cleaner.go +++ b/module/metrics/cleaner.go @@ -5,6 +5,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module/metrics/internal" ) type CleanerCollector struct { @@ -14,8 +16,8 @@ type CleanerCollector struct { func NewCleanerCollector() *CleanerCollector { cc := &CleanerCollector{ gcDuration: promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceStorage, - Subsystem: subsystemBadger, + Namespace: internal.NamespaceStorage, + Subsystem: internal.SubsystemBadger, Name: "garbage_collection_runtime_s", Buckets: []float64{1, 10, 60, 60 * 5, 60 * 15}, Help: "the time spent on badger garbage collection", diff --git a/module/metrics/collection.go b/module/metrics/collection.go index 19be622f0ab..dafe665e174 100644 --- a/module/metrics/collection.go +++ b/module/metrics/collection.go @@ -7,6 +7,7 @@ import ( "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type CollectionCollector struct { @@ -23,29 +24,29 @@ func NewCollectionCollector(tracer module.Tracer) *CollectionCollector { tracer: tracer, transactionsIngested: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceCollection, + Namespace: internal.NamespaceCollection, Name: "ingested_transactions_total", Help: "count of transactions ingested by this node", }), finalizedHeight: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespaceCollection, - Subsystem: subsystemProposal, + Namespace: internal.NamespaceCollection, + Subsystem: internal.SubsystemProposal, Name: "finalized_height", Help: "tracks the latest finalized height", }, []string{LabelChain}), proposals: promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespaceCollection, - Subsystem: subsystemProposal, + Namespace: internal.NamespaceCollection, + Subsystem: internal.SubsystemProposal, Buckets: []float64{1, 2, 5, 10, 20}, Name: "proposals_size_transactions", Help: "size/number of proposed collections", }, []string{LabelChain}), guarantees: promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespaceCollection, - Subsystem: subsystemProposal, + Namespace: internal.NamespaceCollection, + Subsystem: internal.SubsystemProposal, Buckets: []float64{1, 2, 5, 10, 20}, Name: "guarantees_size_transactions", Help: "size/number of guaranteed/finalized collections", diff --git a/module/metrics/compliance.go b/module/metrics/compliance.go index de74b79cfcf..e3027ee7543 100644 --- a/module/metrics/compliance.go +++ b/module/metrics/compliance.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type ComplianceCollector struct { @@ -38,105 +39,105 @@ func NewComplianceCollector() *ComplianceCollector { currentEpochCounter: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_epoch_counter", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the current epoch's counter", }), currentEpochPhase: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_epoch_phase", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the current epoch's phase", }), committedEpochFinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "committed_epoch_final_view", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the final view of the committed epoch with the greatest counter", }), lastEpochTransitionHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "last_epoch_transition_height", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the height of the most recent finalized epoch transition; in other words the height of the first block of the current epoch", }), currentEpochFinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_epoch_final_view", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the final view of the current epoch", }), currentDKGPhase1FinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_dkg_phase1_final_view", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the final view of phase 1 of the current epochs DKG", }), currentDKGPhase2FinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_dkg_phase2_final_view", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the final view of phase 2 of current epochs DKG", }), currentDKGPhase3FinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_dkg_phase3_final_view", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the final view of phase 3 of the current epochs DKG (a successful DKG will end shortly after this view)", }), finalizedHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "finalized_height", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the last finalized height", }), sealedHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "sealed_height", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the last sealed height", }), finalizedBlocks: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "finalized_blocks_total", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the number of finalized blocks", }, []string{LabelProposer}), sealedBlocks: promauto.NewCounter(prometheus.CounterOpts{ Name: "sealed_blocks_total", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the number of sealed blocks", }), finalizedPayload: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "finalized_payload_total", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the number of resources in finalized blocks", }, []string{LabelResource}), sealedPayload: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "sealed_payload_total", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the number of resources in sealed blocks", }, []string{LabelResource}), finalizedBlocksPerSecond: promauto.NewSummary(prometheus.SummaryOpts{ Name: "finalized_blocks_per_second", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the number of finalized blocks per second/the finalized block rate", Objectives: map[float64]float64{ 0.01: 0.001, @@ -152,8 +153,8 @@ func NewComplianceCollector() *ComplianceCollector { epochEmergencyFallbackTriggered: promauto.NewGauge(prometheus.GaugeOpts{ Name: "epoch_fallback_triggered", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "indicates whether epoch emergency fallback is triggered; if >0, the fallback is triggered", }), } diff --git a/module/metrics/consensus.go b/module/metrics/consensus.go index 0c5229639fd..de3538c4941 100644 --- a/module/metrics/consensus.go +++ b/module/metrics/consensus.go @@ -7,6 +7,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) // ConsensusCollector ... @@ -30,26 +31,26 @@ type ConsensusCollector struct { func NewConsensusCollector(tracer module.Tracer, registerer prometheus.Registerer) *ConsensusCollector { onReceiptDuration := prometheus.NewCounter(prometheus.CounterOpts{ Name: "push_receipts_on_receipt_duration_seconds_total", - Namespace: namespaceConsensus, - Subsystem: subsystemMatchEngine, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemMatchEngine, Help: "time spent in consensus matching engine's onReceipt method in seconds", }) onApprovalDuration := prometheus.NewCounter(prometheus.CounterOpts{ Name: "on_approval_duration_seconds_total", - Namespace: namespaceConsensus, - Subsystem: subsystemMatchEngine, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemMatchEngine, Help: "time spent in consensus matching engine's onApproval method in seconds", }) checkSealingDuration := prometheus.NewCounter(prometheus.CounterOpts{ Name: "check_sealing_duration_seconds_total", - Namespace: namespaceConsensus, - Subsystem: subsystemMatchEngine, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemMatchEngine, Help: "time spent in consensus matching engine's checkSealing method in seconds", }) emergencySealedBlocks := prometheus.NewCounter(prometheus.CounterOpts{ Name: "emergency_sealed_blocks_total", - Namespace: namespaceConsensus, - Subsystem: subsystemCompliance, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCompliance, Help: "the number of blocks sealed in emergency mode", }) registerer.MustRegister( diff --git a/module/metrics/cruisectl.go b/module/metrics/cruisectl.go index 7d56e762d50..6a459ebc810 100644 --- a/module/metrics/cruisectl.go +++ b/module/metrics/cruisectl.go @@ -5,6 +5,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module/metrics/internal" ) // CruiseCtlMetrics captures metrics about the Block Rate Controller, which adjusts @@ -21,32 +23,32 @@ func NewCruiseCtlMetrics() *CruiseCtlMetrics { return &CruiseCtlMetrics{ proportionalErr: promauto.NewGauge(prometheus.GaugeOpts{ Name: "proportional_err_s", - Namespace: namespaceConsensus, - Subsystem: subsystemCruiseCtl, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCruiseCtl, Help: "The current proportional error measured by the controller", }), integralErr: promauto.NewGauge(prometheus.GaugeOpts{ Name: "integral_err_s", - Namespace: namespaceConsensus, - Subsystem: subsystemCruiseCtl, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCruiseCtl, Help: "The current integral error measured by the controller", }), derivativeErr: promauto.NewGauge(prometheus.GaugeOpts{ Name: "derivative_err_per_s", - Namespace: namespaceConsensus, - Subsystem: subsystemCruiseCtl, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCruiseCtl, Help: "The current derivative error measured by the controller", }), targetProposalDur: promauto.NewGauge(prometheus.GaugeOpts{ Name: "target_proposal_dur_s", - Namespace: namespaceConsensus, - Subsystem: subsystemCruiseCtl, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCruiseCtl, Help: "The current target duration from parent to child proposal", }), controllerOutput: promauto.NewGauge(prometheus.GaugeOpts{ Name: "controller_output_s", - Namespace: namespaceConsensus, - Subsystem: subsystemCruiseCtl, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemCruiseCtl, Help: "The most recent output of the controller; the adjustment to subtract from the baseline proposal duration", }), } diff --git a/module/metrics/engine.go b/module/metrics/engine.go index b846e75dcdf..45f7e07d0ed 100644 --- a/module/metrics/engine.go +++ b/module/metrics/engine.go @@ -2,6 +2,7 @@ package metrics import ( "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -23,36 +24,36 @@ func NewEngineCollector() *EngineCollector { sent: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "messages_sent_total", - Namespace: namespaceNetwork, - Subsystem: subsystemEngine, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemEngine, Help: "the number of messages sent by engines", }, []string{EngineLabel, LabelMessage}), received: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "messages_received_total", - Namespace: namespaceNetwork, - Subsystem: subsystemEngine, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemEngine, Help: "the number of messages received by engines", }, []string{EngineLabel, LabelMessage}), handled: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "messages_handled_total", - Namespace: namespaceNetwork, - Subsystem: subsystemEngine, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemEngine, Help: "the number of messages handled by engines", }, []string{EngineLabel, LabelMessage}), inboundDropped: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "inbound_messages_dropped_total", - Namespace: namespaceNetwork, - Subsystem: subsystemEngine, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemEngine, Help: "the number of inbound messages dropped by engines", }, []string{EngineLabel, LabelMessage}), outboundDropped: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "outbound_messages_dropped_total", - Namespace: namespaceNetwork, - Subsystem: subsystemEngine, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemEngine, Help: "the number of outbound messages dropped by engines", }, []string{EngineLabel, LabelMessage}), } diff --git a/module/metrics/execution.go b/module/metrics/execution.go index 8d7b155791e..969e12e1d06 100644 --- a/module/metrics/execution.go +++ b/module/metrics/execution.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type ExecutionCollector struct { @@ -87,436 +88,436 @@ type ExecutionCollector struct { func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { forestApproxMemorySize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "forest_approx_memory_size", Help: "an approximate size of in-memory forest in bytes", }) forestNumberOfTrees := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "forest_number_of_trees", Help: "the number of trees in memory", }) latestTrieRegCount := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "latest_trie_reg_count", Help: "the number of allocated registers (latest created trie)", }) latestTrieRegCountDiff := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "latest_trie_reg_count_diff", Help: "the difference between number of unique register allocated of the latest created trie and parent trie", }) latestTrieRegSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "latest_trie_reg_size", Help: "the size of allocated registers (latest created trie)", }) latestTrieRegSizeDiff := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "latest_trie_reg_size_diff", Help: "the difference between size of unique register allocated of the latest created trie and parent trie", }) latestTrieMaxDepthTouched := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "latest_trie_max_depth_touched", Help: "the maximum depth touched of the latest created trie", }) updatedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "updates_counted", Help: "the number of updates", }) proofSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "average_proof_size", Help: "the average size of a single generated proof in bytes", }) updatedValuesNumber := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "update_values_number", Help: "the total number of values updated", }) updatedValuesSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "update_values_size", Help: "the total size of values for single update in bytes", }) updatedDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "update_duration", Help: "the duration of update operation", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) updatedDurationPerValue := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "update_duration_per_value", Help: "the duration of update operation per value", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) readValuesNumber := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "read_values_number", Help: "the total number of values read", }) readValuesSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "read_values_size", Help: "the total size of values for single read in bytes", }) readDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "read_duration", Help: "the duration of read operation", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) readDurationPerValue := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemMTrie, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemMTrie, Name: "read_duration_per_value", Help: "the duration of read operation per value", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) blockExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_execution_time_milliseconds", Help: "the total time spent on block execution in milliseconds", Buckets: []float64{100, 500, 1000, 1500, 2000, 2500, 3000, 6000}, }) blockComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_computation_used", Help: "the total amount of computation used by a block", Buckets: []float64{1000, 10000, 100000, 500000, 1000000, 5000000, 10000000}, }) blockMemoryUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_memory_used", Help: "the total amount of memory (cadence estimate) used by a block", Buckets: []float64{100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000, 500_000_000_000, 1_000_000_000_000, 5_000_000_000_000, 10_000_000_000_000}, }) blockEventCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_event_counts", Help: "the total number of events emitted during a block execution", Buckets: []float64{10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000}, }) blockEventSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_event_size", Help: "the total number of bytes used by events emitted during a block execution", Buckets: []float64{1_000, 10_000, 100_000, 500_000, 1_000_000, 5_000_000, 10_000_000, 50_000_000, 100_000_000, 500_000_000}, }) blockComputationVector := promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_execution_effort_vector", Help: "execution effort vector of the last executed block by computation kind", }, []string{LabelComputationKind}) blockCachedPrograms := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_execution_cached_programs", Help: "Number of cached programs at the end of block execution", }) blockTransactionCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_transaction_counts", Help: "the total number of transactions per block", Buckets: prometheus.ExponentialBuckets(4, 2, 10), }) blockCollectionCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "block_collection_counts", Help: "the total number of collections per block", Buckets: prometheus.ExponentialBuckets(1, 2, 8), }) collectionExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_execution_time_milliseconds", Help: "the total time spent on collection execution in milliseconds", Buckets: []float64{100, 200, 500, 1000, 1500, 2000}, }) collectionComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_computation_used", Help: "the total amount of computation used by a collection", Buckets: []float64{1000, 10000, 50000, 100000, 500000, 1000000}, }) collectionMemoryUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_memory_used", Help: "the total amount of memory used (cadence estimate) by a collection", Buckets: []float64{10_000_000, 100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000, 500_000_000_000, 1_000_000_000_000, 5_000_000_000_000}, }) collectionEventSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_event_size", Help: "the total byte size used by all events generated during a collection execution", Buckets: []float64{100, 1000, 10000, 100000, 10000000, 100000000, 1000000000}, }) collectionEventCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_event_counts", Help: "the total number of events emitted per collection", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }) collectionNumberOfRegistersTouched := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_number_of_registers_touched", Help: "the total number of registers touched during collection execution", Buckets: prometheus.ExponentialBuckets(10, 2, 12), }) collectionTotalBytesWrittenToRegisters := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_total_number_of_bytes_written_to_registers", Help: "the total number of bytes written to registers during collection execution", Buckets: prometheus.ExponentialBuckets(1000, 2, 16), }) collectionTransactionCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "collection_transaction_counts", Help: "the total number of transactions per collection", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }) collectionRequestsSent := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemIngestion, Name: "collection_requests_sent", Help: "the number of collection requests sent", }) collectionRequestsRetries := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemIngestion, Name: "collection_requests_retries", Help: "the number of collection requests retried", }) transactionParseTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_parse_time_nanoseconds", Help: "the parse time for a transaction in nanoseconds", Buckets: prometheus.ExponentialBuckets(10, 10, 8), }) transactionCheckTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_check_time_nanoseconds", Help: "the checking time for a transaction in nanoseconds", Buckets: prometheus.ExponentialBuckets(10, 10, 8), }) transactionInterpretTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_interpret_time_nanoseconds", Help: "the interpretation time for a transaction in nanoseconds", Buckets: prometheus.ExponentialBuckets(10, 10, 8), }) transactionExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_execution_time_milliseconds", Help: "the total time spent on transaction execution in milliseconds", Buckets: prometheus.ExponentialBuckets(2, 2, 10), }) transactionConflictRetries := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_conflict_retries", Help: "the number of conflict retries needed to successfully commit a transaction. If retry count is high, consider reducing concurrency", Buckets: []float64{0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100}, }) transactionComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_computation_used", Help: "the total amount of computation used by a transaction", Buckets: []float64{50, 100, 500, 1000, 5000, 10000}, }) transactionMemoryEstimate := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_memory_estimate", Help: "the estimated memory used by a transaction", Buckets: []float64{1_000_000, 10_000_000, 100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000}, }) transactionEmittedEvents := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_emitted_events", Help: "the total number of events emitted by a transaction", Buckets: prometheus.ExponentialBuckets(2, 2, 10), }) transactionEventSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "transaction_event_size", Help: "the total number bytes used of events emitted during a transaction execution", Buckets: prometheus.ExponentialBuckets(100, 2, 12), }) scriptExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "script_execution_time_milliseconds", Help: "the total time spent on script execution in milliseconds", Buckets: []float64{2, 4, 8, 16, 32, 64, 100, 250, 500}, }) scriptComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "script_computation_used", Help: "the total amount of computation used by an script", Buckets: []float64{50, 100, 500, 1000, 5000, 10000}, }) scriptMemoryUsage := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "script_memory_usage", Help: "the total amount of memory allocated by a script", Buckets: []float64{100_000, 1_000_000, 10_000_000, 50_000_000, 100_000_000, 500_000_000, 1_000_000_000}, }) scriptMemoryEstimate := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "script_memory_estimate", Help: "the estimated memory used by a script", Buckets: []float64{1_000_000, 10_000_000, 100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000}, }) scriptMemoryDifference := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "script_memory_difference", Help: "the difference in actual memory usage and estimate for a script", Buckets: []float64{-1, 0, 10_000_000, 100_000_000, 1_000_000_000}, }) chunkDataPackRequestProcessedTotal := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemProvider, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemProvider, Name: "chunk_data_packs_requested_total", Help: "the total number of chunk data pack requests processed by provider engine", }) chunkDataPackProofSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemIngestion, Name: "chunk_data_pack_proof_size", Help: "the total number bytes used for storing proof part of chunk data pack", Buckets: prometheus.ExponentialBuckets(1000, 2, 16), }) chunkDataPackCollectionSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemIngestion, Name: "chunk_data_pack_collection_size", Help: "the total number transactions in the collection", Buckets: prometheus.ExponentialBuckets(1, 2, 10), }) blockDataUploadsInProgress := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemBlockDataUploader, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemBlockDataUploader, Name: "block_data_upload_in_progress", Help: "number of concurrently running Block Data upload operations", }) blockDataUploadsDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemBlockDataUploader, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemBlockDataUploader, Name: "block_data_upload_duration_ms", Help: "the duration of update upload operation", Buckets: []float64{1, 100, 500, 1000, 2000}, }) computationResultUploadedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemProvider, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemProvider, Name: "computation_result_uploaded_count", Help: "the total count of computation result uploaded", }) computationResultUploadRetriedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemProvider, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemProvider, Name: "computation_result_upload_retried_count", Help: "the total count of computation result upload retried", }) @@ -582,93 +583,93 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { computationResultUploadedCount: computationResultUploadedCount, computationResultUploadRetriedCount: computationResultUploadRetriedCount, totalExecutedBlocksCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "total_executed_blocks", Help: "the total number of blocks that have been executed", }), totalExecutedCollectionsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "total_executed_collections", Help: "the total number of collections that have been executed", }), totalExecutedTransactionsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "total_executed_transactions", Help: "the total number of transactions that have been executed", }), totalFailedTransactionsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "total_failed_transactions", Help: "the total number of transactions that has failed when executed", }), totalExecutedScriptsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "total_executed_scripts", Help: "the total number of scripts that have been executed", }), lastExecutedBlockHeightGauge: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "last_executed_block_height", Help: "the last height that was executed", }), stateStorageDiskTotal: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemStateStorage, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemStateStorage, Name: "data_size_bytes", Help: "the execution state size on disk in bytes", }), storageStateCommitment: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemStateStorage, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemStateStorage, Name: "commitment_size_bytes", Help: "the storage size of a state commitment in bytes", }), stateSyncActive: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemIngestion, Name: "state_sync_active", Help: "indicates if the state sync is active", }), numberOfAccounts: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "number_of_accounts", Help: "the number of existing accounts on the network", }), programsCacheMiss: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "programs_cache_miss", Help: "the number of times a program was not found in the cache and had to be loaded", }), programsCacheHit: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceExecution, - Subsystem: subsystemRuntime, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemRuntime, Name: "programs_cache_hit", Help: "the number of times a program was found in the cache", }), maxCollectionHeight: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "max_collection_height", - Namespace: namespaceExecution, - Subsystem: subsystemIngestion, + Namespace: internal.NamespaceExecution, + Subsystem: internal.SubsystemIngestion, Help: "gauge to track the maximum block height of collections received", }), } diff --git a/module/metrics/execution_data_requester.go b/module/metrics/execution_data_requester.go index e8ccc5e3266..1d159e9a31a 100644 --- a/module/metrics/execution_data_requester.go +++ b/module/metrics/execution_data_requester.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type ExecutionDataRequesterCollector struct { @@ -25,51 +26,51 @@ type ExecutionDataRequesterCollector struct { func NewExecutionDataRequesterCollector() module.ExecutionDataRequesterMetrics { fetchDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_requester_download_duration_ms", Help: "the duration of execution data download operation", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }) downloadsInProgress := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_requester_in_progress_downloads", Help: "number of concurrently running execution data download operations", }) outstandingNotifications := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_requester_outstanding_notifications", Help: "number of execution data received notifications waiting to be processed", }) highestDownloadHeight := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_requester_highest_download_height", Help: "highest block height for which execution data has been received", }) highestNotificationHeight := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_requester_highest_notification_height", Help: "highest block height for which execution data notifications have been sent", }) downloadRetries := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_requester_download_retries_total", Help: "number of execution data download retries", }) failedDownloads := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExecutionDataRequester, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExecutionDataRequester, Name: "execution_data_failed_downloads_total", Help: "number of failed execution data downloads", }) diff --git a/module/metrics/execution_data_sync.go b/module/metrics/execution_data_sync.go index 44a49f357fb..0ce08dd6e19 100644 --- a/module/metrics/execution_data_sync.go +++ b/module/metrics/execution_data_sync.go @@ -6,6 +6,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module/metrics/internal" ) type ExecutionDataRequesterV2Collector struct { @@ -24,20 +26,20 @@ func NewExecutionDataRequesterV2Collector() *ExecutionDataRequesterV2Collector { return &ExecutionDataRequesterV2Collector{ fulfilledHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "fulfilled_height", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the latest sealed height for which all execution data has been retrieved and stored locally", }), receiptsSkipped: promauto.NewCounter(prometheus.CounterOpts{ Name: "receipts_skipped", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the number of skipped receipts", }), requestDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "request_durations_ms", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the durations of requests in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -51,14 +53,14 @@ func NewExecutionDataRequesterV2Collector() *ExecutionDataRequesterV2Collector { }), latestSuccessfulRequestHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "latest_successful_request_height", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the block height of the latest successful request", }), executionDataSizes: promauto.NewSummary(prometheus.SummaryOpts{ Name: "execution_data_sizes_bytes", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the sizes of Block Execution Data in bytes", Objectives: map[float64]float64{ 0.01: 0.001, @@ -72,27 +74,27 @@ func NewExecutionDataRequesterV2Collector() *ExecutionDataRequesterV2Collector { }), requestAttempts: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "request_attempts", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Buckets: []float64{1, 2, 3, 4, 5}, Help: "the number of attempts before a request succeeded", }), requestsFailed: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "requests_failed", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the number of failed requests", }, []string{ExecutionDataRequestRetryable}), requestsCancelled: promauto.NewCounter(prometheus.CounterOpts{ Name: "requests_cancelled", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the number of cancelled requests", }), responsesDropped: promauto.NewCounter(prometheus.CounterOpts{ Name: "responses_dropped", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataRequester, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataRequester, Help: "the number of dropped responses", }), } @@ -138,8 +140,8 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { return &ExecutionDataProviderCollector{ computeRootIDDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "compute_root_id_durations_ms", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataProvider, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataProvider, Help: "the durations of computing root IDs in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -153,15 +155,15 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { }), numberOfChunks: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "number_of_chunks", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataProvider, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataProvider, Buckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}, Help: "the number of chunks in a Block Execution Data", }), addBlobsDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "add_blobs_durations_ms", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataProvider, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataProvider, Help: "the durations of adding blobs in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -175,8 +177,8 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { }), executionDataSizes: promauto.NewSummary(prometheus.SummaryOpts{ Name: "execution_data_sizes_bytes", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataProvider, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataProvider, Help: "the sizes of Block Execution Data in bytes", Objectives: map[float64]float64{ 0.01: 0.001, @@ -190,8 +192,8 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { }), addBlobsFailed: promauto.NewCounter(prometheus.CounterOpts{ Name: "add_blobs_failed", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataProvider, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataProvider, Help: "the number of failed attempts to add blobs", }), } @@ -220,8 +222,8 @@ func NewExecutionDataPrunerCollector() *ExecutionDataPrunerCollector { return &ExecutionDataPrunerCollector{ pruneDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "prune_durations_ms", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataPruner, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataPruner, Help: "the durations of pruning in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -235,8 +237,8 @@ func NewExecutionDataPrunerCollector() *ExecutionDataPrunerCollector { }), latestHeightPruned: promauto.NewGauge(prometheus.GaugeOpts{ Name: "latest_height_pruned", - Namespace: namespaceExecutionDataSync, - Subsystem: subsystemExeDataPruner, + Namespace: internal.NamespaceExecutionDataSync, + Subsystem: internal.SubsystemExeDataPruner, Help: "the latest height pruned", }), } diff --git a/module/metrics/gossipsub.go b/module/metrics/gossipsub.go index 5ba5369fa0d..26e8ee9936e 100644 --- a/module/metrics/gossipsub.go +++ b/module/metrics/gossipsub.go @@ -5,6 +5,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type GossipSubMetrics struct { @@ -27,8 +28,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedIHaveCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_received_ihave_total", Help: "number of received ihave messages from gossipsub protocol", }, @@ -36,8 +37,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedIWantCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_received_iwant_total", Help: "number of received iwant messages from gossipsub protocol", }, @@ -45,8 +46,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedGraftCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_received_graft_total", Help: "number of received graft messages from gossipsub protocol", }, @@ -54,8 +55,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedPruneCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_received_prune_total", Help: "number of received prune messages from gossipsub protocol", }, @@ -63,8 +64,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.incomingRpcAcceptedFullyCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_incoming_rpc_accepted_fully_total", Help: "number of incoming rpc messages accepted fully by gossipsub protocol", }, @@ -72,8 +73,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.incomingRpcAcceptedOnlyControlCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_incoming_rpc_accepted_only_control_total", Help: "number of incoming rpc messages accepted only control messages by gossipsub protocol", }, @@ -81,8 +82,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.incomingRpcRejectedCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_incoming_rpc_rejected_total", Help: "number of incoming rpc messages rejected by gossipsub protocol", }, @@ -90,8 +91,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedPublishMessageCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gs.prefix + "gossipsub_received_publish_message_total", Help: "number of received publish messages from gossipsub protocol", }, @@ -161,8 +162,8 @@ func NewGossipSubLocalMeshMetrics(prefix string) *GossipSubLocalMeshMetrics { return &GossipSubLocalMeshMetrics{ localMeshSize: *promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_local_mesh_size", Help: "number of peers in the local mesh of the node", }, diff --git a/module/metrics/gossipsub_rpc_validation_inspector.go b/module/metrics/gossipsub_rpc_validation_inspector.go index f4d79d4121d..6823848bdca 100644 --- a/module/metrics/gossipsub_rpc_validation_inspector.go +++ b/module/metrics/gossipsub_rpc_validation_inspector.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) // GossipSubRpcValidationInspectorMetrics metrics collector for the gossipsub RPC validation inspector. @@ -25,16 +26,16 @@ func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValid gc := &GossipSubRpcValidationInspectorMetrics{prefix: prefix} gc.rpcCtrlMsgInBlockingPreProcessingGauge = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gc.prefix + "control_message_in_blocking_preprocess_total", Help: "the number of rpc control messages currently being pre-processed", }, []string{LabelCtrlMsgType}, ) gc.rpcCtrlMsgBlockingProcessingTimeHistogram = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gc.prefix + "rpc_control_message_validator_blocking_preprocessing_time_seconds", Help: "duration [seconds; measured with float64 precision] of how long the rpc control message validator blocked pre-processing an rpc control message", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, @@ -42,16 +43,16 @@ func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValid ) gc.rpcCtrlMsgInAsyncPreProcessingGauge = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gc.prefix + "control_messages_in_async_processing_total", Help: "the number of rpc control messages currently being processed asynchronously by workers from the rpc validator worker pool", }, []string{LabelCtrlMsgType}, ) gc.rpcCtrlMsgAsyncProcessingTimeHistogram = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: gc.prefix + "rpc_control_message_validator_async_processing_time_seconds", Help: "duration [seconds; measured with float64 precision] of how long it takes rpc control message validator to asynchronously process a rpc message", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, diff --git a/module/metrics/gossipsub_score.go b/module/metrics/gossipsub_score.go index 2f574cf332b..f7e1d275907 100644 --- a/module/metrics/gossipsub_score.go +++ b/module/metrics/gossipsub_score.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" "github.com/onflow/flow-go/network/channels" ) @@ -38,8 +39,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.peerScore = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_overall_peer_score", Help: "overall peer score from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -48,8 +49,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.appSpecificScore = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_app_specific_score", Help: "app specific score from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -58,8 +59,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.behaviourPenalty = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_behaviour_penalty_score", Help: "behaviour penalty from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -68,8 +69,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.ipCollocationFactor = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_ip_collocation_factor_score", Help: "ip collocation factor from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -78,8 +79,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.timeInMesh = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_time_in_mesh_score", Help: "time in mesh from gossipsub scoring", Buckets: gossipSubScoreBuckets, @@ -89,8 +90,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.meshMessageDelivery = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_mesh_message_delivery_score", Help: "mesh message delivery from gossipsub peer scoring", }, @@ -99,8 +100,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.invalidMessageDelivery = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_invalid_message_delivery_score", Help: "invalid message delivery from gossipsub peer scoring", }, @@ -109,8 +110,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.firstMessageDelivery = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_first_message_delivery_score", Help: "first message delivery from gossipsub peer scoring", }, @@ -119,8 +120,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.warningStateGauge = promauto.NewGauge( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: prefix + "gossipsub_warning_state_peers_total", Help: "number of peers in the warning state", }, diff --git a/module/metrics/herocache.go b/module/metrics/herocache.go index f82cd84bb57..7396a775bf8 100644 --- a/module/metrics/herocache.go +++ b/module/metrics/herocache.go @@ -6,7 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/module/metrics/internal" ) const subsystemHeroCache = "hero_cache" @@ -64,143 +64,24 @@ func NewNoopHeroCacheMetricsFactory() HeroCacheMetricsFactory { } } -func NetworkReceiveCacheMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := ResourceNetworkingReceiveCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -// DisallowListCacheMetricsFactory is the factory method for creating a new HeroCacheCollector for the disallow list cache. -// The disallow-list cache is used to keep track of peers that are disallow-listed and the reasons for it. -// Args: -// - f: the HeroCacheMetricsFactory to create the collector -// - networkingType: the networking type of the cache, i.e., whether it is used for the public or the private network -// Returns: -// - a HeroCacheMetrics for the disallow list cache -func DisallowListCacheMetricsFactory(f HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { - r := ResourceNetworkingDisallowListCache - if networkingType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func NetworkDnsTxtCacheMetricsFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDnsTxtCache, registrar) -} - -func NetworkDnsIpCacheMetricsFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDnsIpCache, registrar) -} - func ChunkDataPackRequestQueueMetricsFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceExecution, ResourceChunkDataPackRequests, registrar) + return NewHeroCacheCollector(internal.NamespaceExecution, ResourceChunkDataPackRequests, registrar) } func ReceiptRequestsQueueMetricFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceExecution, ResourceReceipt, registrar) + return NewHeroCacheCollector(internal.NamespaceExecution, ResourceReceipt, registrar) } func CollectionRequestsQueueMetricFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceCollection, ResourceCollection, registrar) -} - -func DisallowListNotificationQueueMetricFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDisallowListNotificationQueue, registrar) -} - -func ApplicationLayerSpamRecordCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := ResourceNetworkingApplicationLayerSpamRecordCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - - return f(namespaceNetwork, r) -} - -func ApplicationLayerSpamRecordQueueMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := ResourceNetworkingApplicationLayerSpamReportQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func GossipSubRPCMetricsObserverInspectorQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := ResourceNetworkingRpcMetricsObserverInspectorQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func GossipSubRPCInspectorQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := ResourceNetworkingRpcValidationInspectorQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func GossipSubRPCSentTrackerMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := ResourceNetworkingRPCSentTrackerCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func GossipSubRPCSentTrackerQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := ResourceNetworkingRPCSentTrackerQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func RpcInspectorNotificationQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := ResourceNetworkingRpcInspectorNotificationQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) -} - -func GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := ResourceNetworkingRpcClusterPrefixReceivedCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(namespaceNetwork, r) + return NewHeroCacheCollector(internal.NamespaceCollection, ResourceCollection, registrar) } func CollectionNodeTransactionsCacheMetrics(registrar prometheus.Registerer, epoch uint64) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceCollection, fmt.Sprintf("%s_%d", ResourceTransaction, epoch), registrar) -} - -func FollowerCacheMetrics(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceFollowerEngine, ResourceFollowerPendingBlocksCache, registrar) + return NewHeroCacheCollector(internal.NamespaceCollection, fmt.Sprintf("%s_%d", ResourceTransaction, epoch), registrar) } func AccessNodeExecutionDataCacheMetrics(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(namespaceAccess, ResourceExecutionDataCache, registrar) -} - -// PrependPublicPrefix prepends the string "public" to the given string. -// This is used to distinguish between public and private metrics. -// Args: -// - str: the string to prepend, example: "my_metric" -// Returns: -// - the prepended string, example: "public_my_metric" -func PrependPublicPrefix(str string) string { - return fmt.Sprintf("%s_%s", "public", str) + return NewHeroCacheCollector(internal.NamespaceAccess, ResourceExecutionDataCache, registrar) } func NewHeroCacheCollector(nameSpace string, cacheName string, registrar prometheus.Registerer) *HeroCacheCollector { diff --git a/module/metrics/hotstuff.go b/module/metrics/hotstuff.go index df843cdeaa8..9d972d96c76 100644 --- a/module/metrics/hotstuff.go +++ b/module/metrics/hotstuff.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) // HotStuff Metrics @@ -55,8 +56,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { busyDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "busy_duration_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff's event loop has been busy processing one event", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -64,8 +65,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { idleDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "idle_duration_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff's event loop has been idle without processing any event", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -73,8 +74,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { waitDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "wait_duration_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long an event has been waited in the HotStuff event loop queue before being processed.", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -82,56 +83,56 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { curView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "cur_view", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "the current view that the event handler has entered", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), qcView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "qc_view", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "The view of the newest known QC from HotStuff", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), tcView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "tc_view", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "The view of the newest known TC from HotStuff", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), skips: promauto.NewCounter(prometheus.CounterOpts{ Name: "skips_total", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "The number of times we skipped ahead some views", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeouts: promauto.NewCounter(prometheus.CounterOpts{ Name: "timeouts_total", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "The number of views that this replica left due to observing a TC", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeoutDuration: promauto.NewGauge(prometheus.GaugeOpts{ Name: "timeout_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "The current length of the timeout", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), committeeComputationsDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "committee_computations_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends computing consensus committee relations", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -139,8 +140,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { signerComputationsDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "crypto_computations_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends with crypto-related operations", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -148,8 +149,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { validatorComputationsDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "message_validation_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends with message-validation", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -157,47 +158,47 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { payloadProductionDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "payload_production_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends with payload production", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), blockProcessingDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "block_processing_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long compliance engine processes one block", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), voteProcessingDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "vote_processing_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long VoteAggregator processes one message", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeoutProcessingDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "timeout_object_processing_seconds", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long TimeoutAggregator processes one message", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeoutCollectorsRange: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "timeout_collectors_range", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "lowest and highest views that we are maintaining TimeoutCollectors for", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{"prefix"}), numberOfActiveCollectors: promauto.NewGauge(prometheus.GaugeOpts{ Name: "active_collectors", - Namespace: namespaceConsensus, - Subsystem: subsystemHotstuff, + Namespace: internal.NamespaceConsensus, + Subsystem: internal.SubsystemHotstuff, Help: "number of active TimeoutCollectors that the TimeoutAggregator component currently maintains", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), diff --git a/module/metrics/internal/namespaces.go b/module/metrics/internal/namespaces.go new file mode 100644 index 00000000000..75ca48d0647 --- /dev/null +++ b/module/metrics/internal/namespaces.go @@ -0,0 +1,111 @@ +package internal + +// Prometheus metric namespaces +const ( + NamespaceNetwork = "network" + NamespaceStorage = "storage" + NamespaceAccess = "access" + NamespaceObserver = "observer" + NamespaceCollection = "collection" + NamespaceConsensus = "consensus" + NamespaceVerification = "verification" + NamespaceExecution = "execution" + NamespaceLoader = "loader" + NamespaceStateSync = "state_synchronization" + NamespaceExecutionDataSync = "execution_data_sync" + NamespaceChainsync = "chainsync" + NamespaceFollowerEngine = "follower" + NamespaceRestAPI = "access_rest_api" +) + +// Network subsystems represent the various layers of networking. +const ( + SubsystemLibp2p = "libp2p" + SubsystemGossip = "gossip" + SubsystemEngine = "engine" + SubsystemQueue = "queue" + SubsystemDHT = "dht" + SubsystemBitswap = "bitswap" + SubsystemAuth = "authorization" + SubsystemRateLimiting = "ratelimit" + SubsystemAlsp = "alsp" + SubsystemSecurity = "security" +) + +// Storage subsystems represent the various components of the storage layer. +const ( + SubsystemBadger = "badger" + SubsystemMempool = "mempool" + SubsystemCache = "cache" +) + +// Access subsystem +const ( + SubsystemTransactionTiming = "transaction_timing" + SubsystemTransactionSubmission = "transaction_submission" + SubsystemConnectionPool = "connection_pool" + SubsystemHTTP = "http" +) + +// Observer subsystem +const ( + SubsystemObserverGRPC = "observer_grpc" +) + +// Collection subsystem +const ( + SubsystemProposal = "proposal" +) + +// Consensus subsystems represent the different components of the consensus algorithm. +const ( + SubsystemCompliance = "compliance" + SubsystemHotstuff = "hotstuff" + SubsystemCruiseCtl = "cruisectl" + SubsystemMatchEngine = "match" +) + +// Execution Subsystems +const ( + SubsystemStateStorage = "state_storage" + SubsystemMTrie = "mtrie" + SubsystemIngestion = "ingestion" + SubsystemRuntime = "runtime" + SubsystemProvider = "provider" + SubsystemBlockDataUploader = "block_data_uploader" +) + +// Verification Subsystems +const ( + SubsystemAssignerEngine = "assigner" + SubsystemFetcherEngine = "fetcher" + SubsystemRequesterEngine = "requester" + SubsystemVerifierEngine = "verifier" + SubsystemBlockConsumer = "block_consumer" + SubsystemChunkConsumer = "chunk_consumer" +) + +// Execution Data Sync Subsystems +const ( + SubsystemExeDataRequester = "requester" + SubsystemExeDataProvider = "provider" + SubsystemExeDataPruner = "pruner" + SubsystemExecutionDataRequester = "execution_data_requester" + SubsystemExeDataBlobstore = "blobstore" +) + +// module/synchronization core +const ( + SubsystemSyncCore = "sync_core" +) + +// METRIC NAMING GUIDELINES +// Namespace: +// * If it's under a module, use the module name. eg: hotstuff, network, storage, mempool, interpreter, crypto +// * If it's a core metric from a node, use the node type. eg: consensus, verification, access +// Subsystem: +// * Subsystem is optional if the entire namespace is small enough to not be segmented further. +// * Within the component, describe the part or function referred to. +// Constant Labels: +// * node_role: [collection, consensus, execution, verification, access] +// * beta_metric: true diff --git a/module/metrics/libp2p_resource_manager.go b/module/metrics/libp2p_resource_manager.go index 4effd90d5e5..c8ae59deb13 100644 --- a/module/metrics/libp2p_resource_manager.go +++ b/module/metrics/libp2p_resource_manager.go @@ -11,8 +11,9 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/module/metrics/internal" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) type LibP2PResourceManagerMetrics struct { @@ -48,8 +49,8 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP l := &LibP2PResourceManagerMetrics{logger: logger, prefix: prefix} l.allowConnectionCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_allow_connection_total", Help: "total number of connections allowed by the libp2p resource manager", @@ -57,8 +58,8 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP }, []string{LabelConnectionDirection, LabelConnectionUseFD}) l.blockConnectionCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_connection_total", Help: "total number of connections blocked by the libp2p resource manager", @@ -66,22 +67,22 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP }, []string{LabelConnectionDirection, LabelConnectionUseFD}) l.allowStreamCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_allow_stream_total", Help: "total number of streams allowed by the libp2p resource manager", }, []string{LabelConnectionDirection}) l.blockStreamCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_stream_total", Help: "total number of streams blocked by the libp2p resource manager", }, []string{LabelConnectionDirection}) l.allowPeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_allow_peer_total", Help: "total number of remote peers allowed by the libp2p resource manager to attach to their relevant incoming/outgoing streams", }) @@ -89,22 +90,22 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP // Note: this is a low level metric than blockProtocolPeerCount. // This metric is incremented when a peer is blocked by the libp2p resource manager on attaching as one end of a stream (on any protocol). l.blockPeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_peer_total", Help: "total number of remote peers blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams", }) l.allowProtocolCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_allow_protocol_total", Help: "total number of protocols allowed by the libp2p resource manager to attach to their relevant incoming/outgoing streams", }) l.blockProtocolCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_protocol_total", Help: "total number of protocols blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams", }) @@ -112,22 +113,22 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP // Note: this is a higher level metric than blockPeerCount and blockProtocolCount. // This metric is incremented when a peer is already attached as one end of a stream but on a different reserved protocol. l.blockProtocolPeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_protocol_peer_total", Help: "total number of remote peers blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams on a specific protocol", }) l.allowServiceCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_allow_service_total", Help: "total number of remote services (e.g., ping, relay) allowed by the libp2p resource manager to attach to their relevant incoming/outgoing streams", }) l.blockServiceCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_service_total", Help: "total number of remote services (e.g., ping, relay) blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams", }) @@ -135,23 +136,23 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP // Note: this is a higher level metric than blockServiceCount and blockPeerCount. // This metric is incremented when a service is already attached as one end of a stream but on a different reserved protocol. l.blockServicePeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_block_service_peer_total", Help: "total number of remote services (e.g., ping, relay) blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams on a specific peer", }) l.allowMemoryHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_allowed_memory_bytes", Help: "size of memory allocation requests allowed by the libp2p resource manager", Buckets: []float64{KiB, 10 * KiB, 100 * KiB, 500 * KiB, 1 * MiB, 10 * MiB, 100 * MiB, 500 * MiB, 1 * GiB}, }) l.blockMemoryHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemLibp2p, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemLibp2p, Name: l.prefix + "resource_manager_blocked_memory_bytes", Help: "size of memory allocation requests blocked by the libp2p resource manager", Buckets: []float64{KiB, 10 * KiB, 100 * KiB, 500 * KiB, 1 * MiB, 10 * MiB, 100 * MiB, 500 * MiB, 1 * GiB}, diff --git a/module/metrics/loader.go b/module/metrics/loader.go index 342dc99b845..da7c37cce9d 100644 --- a/module/metrics/loader.go +++ b/module/metrics/loader.go @@ -5,6 +5,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/onflow/flow-go/module/metrics/internal" ) type LoaderCollector struct { @@ -21,27 +23,27 @@ func NewLoaderCollector() *LoaderCollector { cc := &LoaderCollector{ transactionsSent: promauto.NewCounter(prometheus.CounterOpts{ Name: "transactions_sent", - Namespace: namespaceLoader, + Namespace: internal.NamespaceLoader, Help: "transactions sent by the loader", }), transactionsLost: promauto.NewCounter(prometheus.CounterOpts{ Name: "transactions_lost", - Namespace: namespaceLoader, + Namespace: internal.NamespaceLoader, Help: "transaction that took too long to return", }), tpsConfigured: promauto.NewGauge(prometheus.GaugeOpts{ Name: "transactions_per_second_configured", - Namespace: namespaceLoader, + Namespace: internal.NamespaceLoader, Help: "transactions per second that the loader should send", }), transactionsExecuted: promauto.NewCounter(prometheus.CounterOpts{ Name: "transactions_executed", - Namespace: namespaceLoader, + Namespace: internal.NamespaceLoader, Help: "transaction successfully executed by the loader", }), tteInSeconds: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "transactions_executed_in_seconds", - Namespace: namespaceLoader, + Namespace: internal.NamespaceLoader, Help: "Time To Execute histogram for transactions (in seconds)", Buckets: prometheus.ExponentialBuckets(2, 2, 8), }), diff --git a/module/metrics/mempool.go b/module/metrics/mempool.go index 7209af9be70..a1aea742c8e 100644 --- a/module/metrics/mempool.go +++ b/module/metrics/mempool.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type MempoolCollector struct { @@ -29,8 +30,8 @@ func NewMempoolCollector(interval time.Duration) *MempoolCollector { entries: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "entries_total", - Namespace: namespaceStorage, - Subsystem: subsystemMempool, + Namespace: internal.NamespaceStorage, + Subsystem: internal.SubsystemMempool, Help: "the number of entries in the mempool", }, []string{LabelResource}), } diff --git a/module/metrics/namespaces.go b/module/metrics/namespaces.go deleted file mode 100644 index f89f2a530ae..00000000000 --- a/module/metrics/namespaces.go +++ /dev/null @@ -1,111 +0,0 @@ -package metrics - -// Prometheus metric namespaces -const ( - namespaceNetwork = "network" - namespaceStorage = "storage" - namespaceAccess = "access" - namespaceObserver = "observer" - namespaceCollection = "collection" - namespaceConsensus = "consensus" - namespaceVerification = "verification" - namespaceExecution = "execution" - namespaceLoader = "loader" - namespaceStateSync = "state_synchronization" - namespaceExecutionDataSync = "execution_data_sync" - namespaceChainsync = "chainsync" - namespaceFollowerEngine = "follower" - namespaceRestAPI = "access_rest_api" -) - -// Network subsystems represent the various layers of networking. -const ( - subsystemLibp2p = "libp2p" - subsystemGossip = "gossip" - subsystemEngine = "engine" - subsystemQueue = "queue" - subsystemDHT = "dht" - subsystemBitswap = "bitswap" - subsystemAuth = "authorization" - subsystemRateLimiting = "ratelimit" - subsystemAlsp = "alsp" - subsystemSecurity = "security" -) - -// Storage subsystems represent the various components of the storage layer. -const ( - subsystemBadger = "badger" - subsystemMempool = "mempool" - subsystemCache = "cache" -) - -// Access subsystem -const ( - subsystemTransactionTiming = "transaction_timing" - subsystemTransactionSubmission = "transaction_submission" - subsystemConnectionPool = "connection_pool" - subsystemHTTP = "http" -) - -// Observer subsystem -const ( - subsystemObserverGRPC = "observer_grpc" -) - -// Collection subsystem -const ( - subsystemProposal = "proposal" -) - -// Consensus subsystems represent the different components of the consensus algorithm. -const ( - subsystemCompliance = "compliance" - subsystemHotstuff = "hotstuff" - subsystemCruiseCtl = "cruisectl" - subsystemMatchEngine = "match" -) - -// Execution Subsystems -const ( - subsystemStateStorage = "state_storage" - subsystemMTrie = "mtrie" - subsystemIngestion = "ingestion" - subsystemRuntime = "runtime" - subsystemProvider = "provider" - subsystemBlockDataUploader = "block_data_uploader" -) - -// Verification Subsystems -const ( - subsystemAssignerEngine = "assigner" - subsystemFetcherEngine = "fetcher" - subsystemRequesterEngine = "requester" - subsystemVerifierEngine = "verifier" - subsystemBlockConsumer = "block_consumer" - subsystemChunkConsumer = "chunk_consumer" -) - -// Execution Data Sync Subsystems -const ( - subsystemExeDataRequester = "requester" - subsystemExeDataProvider = "provider" - subsystemExeDataPruner = "pruner" - subsystemExecutionDataRequester = "execution_data_requester" - subsystemExeDataBlobstore = "blobstore" -) - -// module/synchronization core -const ( - subsystemSyncCore = "sync_core" -) - -// METRIC NAMING GUIDELINES -// Namespace: -// * If it's under a module, use the module name. eg: hotstuff, network, storage, mempool, interpreter, crypto -// * If it's a core metric from a node, use the node type. eg: consensus, verification, access -// Subsystem: -// * Subsystem is optional if the entire namespace is small enough to not be segmented further. -// * Within the component, describe the part or function referred to. -// Constant Labels: -// * node_role: [collection, consensus, execution, verification, access] -// * beta_metric: true diff --git a/module/metrics/network.go b/module/metrics/network.go index af9359fef21..27efce5e4c0 100644 --- a/module/metrics/network.go +++ b/module/metrics/network.go @@ -10,8 +10,9 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/module/metrics/internal" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( @@ -83,8 +84,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.outboundMessageSize = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "outbound_message_size_bytes", Help: "size of the outbound network message", Buckets: []float64{KiB, 100 * KiB, 500 * KiB, 1 * MiB, 2 * MiB, 4 * MiB}, @@ -93,8 +94,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.inboundMessageSize = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "inbound_message_size_bytes", Help: "size of the inbound network message", Buckets: []float64{KiB, 100 * KiB, 500 * KiB, 1 * MiB, 2 * MiB, 4 * MiB}, @@ -103,8 +104,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.duplicateMessagesDropped = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "duplicate_messages_dropped", Help: "number of duplicate messages dropped", }, []string{LabelChannel, LabelProtocol, LabelMessage}, @@ -112,8 +113,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsLookupDuration = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "dns_lookup_duration_ms", Buckets: []float64{1, 10, 100, 500, 1000, 2000}, Help: "the time spent on resolving a dns lookup (including cache hits)", @@ -122,8 +123,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsCacheMissCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "dns_cache_miss_total", Help: "the number of dns lookups that miss the cache and made through network", }, @@ -131,8 +132,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsCacheInvalidationCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "dns_cache_invalidation_total", Help: "the number of times dns cache is invalidated for an entry", }, @@ -140,8 +141,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsCacheHitCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "dns_cache_hit_total", Help: "the number of dns cache hits", }, @@ -149,8 +150,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsLookupRequestDroppedCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "dns_lookup_requests_dropped_total", Help: "the number of dns lookup requests dropped", }, @@ -158,8 +159,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.queueSize = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemQueue, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemQueue, Name: nc.prefix + "message_queue_size", Help: "the number of elements in the message receive queue", }, []string{LabelPriority}, @@ -167,8 +168,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.queueDuration = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemQueue, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemQueue, Name: nc.prefix + "message_queue_duration_seconds", Help: "duration [seconds; measured with float64 precision] of how long a message spent in the queue before delivered to an engine.", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, // 10ms, 100ms, 500ms, 1s, 2s, 5s @@ -177,8 +178,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.numMessagesProcessing = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemQueue, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemQueue, Name: nc.prefix + "current_messages_processing", Help: "the number of messages currently being processed", }, []string{LabelChannel}, @@ -186,8 +187,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.numDirectMessagesSending = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: nc.prefix + "direct_messages_in_progress", Help: "the number of direct messages currently in the process of sending", }, []string{LabelChannel}, @@ -195,8 +196,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.inboundProcessTime = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemQueue, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemQueue, Name: nc.prefix + "engine_message_processing_time_seconds", Help: "duration [seconds; measured with float64 precision] of how long a queue worker blocked for an engine processing message", }, []string{LabelChannel}, @@ -204,8 +205,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.outboundConnectionCount = promauto.NewGauge( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemQueue, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemQueue, Name: nc.prefix + "outbound_connection_count", Help: "the number of outbound connections of this node", }, @@ -213,8 +214,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.inboundConnectionCount = promauto.NewGauge( prometheus.GaugeOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemQueue, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemQueue, Name: nc.prefix + "inbound_connection_count", Help: "the number of inbound connections of this node", }, @@ -223,16 +224,16 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.routingTableSize = promauto.NewGauge( prometheus.GaugeOpts{ Name: nc.prefix + "routing_table_size", - Namespace: namespaceNetwork, - Subsystem: subsystemDHT, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemDHT, Help: "the size of the DHT routing table", }, ) nc.unAuthorizedMessagesCount = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemAuth, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemAuth, Name: nc.prefix + "unauthorized_messages_count", Help: "number of messages that failed authorization validation", }, []string{LabelNodeRole, LabelMessage, LabelChannel, LabelViolationReason}, @@ -240,8 +241,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.rateLimitedUnicastMessagesCount = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemRateLimiting, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemRateLimiting, Name: nc.prefix + "rate_limited_unicast_messages_count", Help: "number of messages sent via unicast that have been rate limited", }, []string{LabelNodeRole, LabelMessage, LabelChannel, LabelRateLimitReason}, @@ -249,8 +250,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.violationReportSkippedCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemSecurity, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemSecurity, Name: nc.prefix + "slashing_violation_reports_skipped_count", Help: "number of slashing violations consumer violations that were not reported for misbehavior because the identity of the sender not known", }, diff --git a/module/metrics/network/herocache.go b/module/metrics/network/herocache.go new file mode 100644 index 00000000000..d1c0744ea3a --- /dev/null +++ b/module/metrics/network/herocache.go @@ -0,0 +1,131 @@ +package networkmetrics + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/internal" + "github.com/onflow/flow-go/network" +) + +func NetworkReceiveCacheMetricsFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := metrics.ResourceNetworkingReceiveCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +// DisallowListCacheMetricsFactory is the factory method for creating a new HeroCacheCollector for the disallow list cache. +// The disallow-list cache is used to keep track of peers that are disallow-listed and the reasons for it. +// Args: +// - f: the HeroCacheMetricsFactory to create the collector +// - networkingType: the networking type of the cache, i.e., whether it is used for the public or the private network +// Returns: +// - a HeroCacheMetrics for the disallow list cache +func DisallowListCacheMetricsFactory(f metrics.HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { + r := metrics.ResourceNetworkingDisallowListCache + if networkingType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func NetworkDnsTxtCacheMetricsFactory(registrar prometheus.Registerer) *metrics.HeroCacheCollector { + return metrics.NewHeroCacheCollector(internal.NamespaceNetwork, metrics.ResourceNetworkingDnsTxtCache, registrar) +} + +func NetworkDnsIpCacheMetricsFactory(registrar prometheus.Registerer) *metrics.HeroCacheCollector { + return metrics.NewHeroCacheCollector(internal.NamespaceNetwork, metrics.ResourceNetworkingDnsIpCache, registrar) +} + +func DisallowListNotificationQueueMetricFactory(registrar prometheus.Registerer) *metrics.HeroCacheCollector { + return metrics.NewHeroCacheCollector(internal.NamespaceNetwork, metrics.ResourceNetworkingDisallowListNotificationQueue, registrar) +} + +func ApplicationLayerSpamRecordCacheMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := metrics.ResourceNetworkingApplicationLayerSpamRecordCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + + return f(internal.NamespaceNetwork, r) +} + +func ApplicationLayerSpamRecordQueueMetricsFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := metrics.ResourceNetworkingApplicationLayerSpamReportQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func GossipSubRPCMetricsObserverInspectorQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := metrics.ResourceNetworkingRpcMetricsObserverInspectorQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func GossipSubRPCInspectorQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := metrics.ResourceNetworkingRpcValidationInspectorQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func GossipSubRPCSentTrackerMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := metrics.ResourceNetworkingRPCSentTrackerCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func GossipSubRPCSentTrackerQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := metrics.ResourceNetworkingRPCSentTrackerQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func RpcInspectorNotificationQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := metrics.ResourceNetworkingRpcInspectorNotificationQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := metrics.ResourceNetworkingRpcClusterPrefixReceivedCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(internal.NamespaceNetwork, r) +} + +func FollowerCacheMetrics(registrar prometheus.Registerer) *metrics.HeroCacheCollector { + return metrics.NewHeroCacheCollector(internal.NamespaceFollowerEngine, metrics.ResourceFollowerPendingBlocksCache, registrar) +} + +// PrependPublicPrefix prepends the string "public" to the given string. +// This is used to distinguish between public and private metrics. +// Args: +// - str: the string to prepend, example: "my_metric" +// Returns: +// - the prepended string, example: "public_my_metric" +func PrependPublicPrefix(str string) string { + return fmt.Sprintf("%s_%s", "public", str) +} diff --git a/module/metrics/observer.go b/module/metrics/observer.go index 375aa66a2ac..3aa2a9d684a 100644 --- a/module/metrics/observer.go +++ b/module/metrics/observer.go @@ -4,6 +4,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "google.golang.org/grpc/codes" + + "github.com/onflow/flow-go/module/metrics/internal" ) type ObserverMetrics interface { @@ -19,8 +21,8 @@ var _ ObserverMetrics = (*ObserverCollector)(nil) func NewObserverCollector() *ObserverCollector { return &ObserverCollector{ rpcs: promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: namespaceObserver, - Subsystem: subsystemObserverGRPC, + Namespace: internal.NamespaceObserver, + Subsystem: internal.SubsystemObserverGRPC, Name: "handler_grpc_counter", Help: "tracking error/success rate of each rpc for the observer service", }, []string{"handler", "grpc_method", "grpc_code"}), diff --git a/module/metrics/ping.go b/module/metrics/ping.go index 2bc20f42c82..cfb5f5597fa 100644 --- a/module/metrics/ping.go +++ b/module/metrics/ping.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/metrics/internal" ) type PingCollector struct { @@ -19,20 +20,20 @@ func NewPingCollector() *PingCollector { pc := &PingCollector{ reachable: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "node_reachable", - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Help: "report whether a node is reachable", }, []string{LabelNodeID, LabelNodeAddress, LabelNodeRole, LabelNodeInfo}), sealedHeight: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "sealed_height", - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Help: "the last sealed height of a node", }, []string{LabelNodeID, LabelNodeAddress, LabelNodeRole, LabelNodeInfo, LabelNodeVersion}), hotstuffCurView: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "hotstuff_curview", - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Help: "the hotstuff current view", }, []string{LabelNodeID, LabelNodeAddress, LabelNodeInfo}), } diff --git a/module/metrics/rate_limited_blockstore.go b/module/metrics/rate_limited_blockstore.go index daebafd6827..707b1b70ca9 100644 --- a/module/metrics/rate_limited_blockstore.go +++ b/module/metrics/rate_limited_blockstore.go @@ -5,6 +5,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type RateLimitedBlockstoreCollector struct { @@ -14,8 +15,8 @@ type RateLimitedBlockstoreCollector struct { func NewRateLimitedBlockstoreCollector(prefix string) module.RateLimitedBlockstoreMetrics { return &RateLimitedBlockstoreCollector{ bytesRead: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: namespaceStateSync, - Subsystem: subsystemExeDataBlobstore, + Namespace: internal.NamespaceStateSync, + Subsystem: internal.SubsystemExeDataBlobstore, Name: prefix + "_bytes_read", Help: "number of bytes read from the blockstore", }), diff --git a/module/metrics/rest_api.go b/module/metrics/rest_api.go index e9132f243c6..f112cf22261 100644 --- a/module/metrics/rest_api.go +++ b/module/metrics/rest_api.go @@ -9,6 +9,7 @@ import ( httpmetrics "github.com/slok/go-http-metrics/metrics" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type RestCollector struct { @@ -33,31 +34,31 @@ func NewRestCollector(urlToRouteMapper func(string) (string, error), registerer r := &RestCollector{ urlToRouteMapper: urlToRouteMapper, httpRequestDurHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespaceRestAPI, - Subsystem: subsystemHTTP, + Namespace: internal.NamespaceRestAPI, + Subsystem: internal.SubsystemHTTP, Name: "request_duration_seconds", Help: "The latency of the HTTP requests.", Buckets: prometheus.DefBuckets, }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpResponseSizeHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: namespaceRestAPI, - Subsystem: subsystemHTTP, + Namespace: internal.NamespaceRestAPI, + Subsystem: internal.SubsystemHTTP, Name: "response_size_bytes", Help: "The size of the HTTP responses.", Buckets: prometheus.ExponentialBuckets(100, 10, 8), }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpRequestsInflight: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespaceRestAPI, - Subsystem: subsystemHTTP, + Namespace: internal.NamespaceRestAPI, + Subsystem: internal.SubsystemHTTP, Name: "requests_inflight", Help: "The number of inflight requests being handled at the same time.", }, []string{LabelService, LabelHandler}), httpRequestsTotal: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: namespaceRestAPI, - Subsystem: subsystemHTTP, + Namespace: internal.NamespaceRestAPI, + Subsystem: internal.SubsystemHTTP, Name: "requests_total", Help: "The number of requests handled over time.", }, []string{LabelMethod, LabelHandler}), diff --git a/module/metrics/transaction.go b/module/metrics/transaction.go index 50fca53bf39..5757b6079a1 100644 --- a/module/metrics/transaction.go +++ b/module/metrics/transaction.go @@ -10,6 +10,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/module/metrics/internal" ) type TransactionCollector struct { @@ -50,8 +51,8 @@ func NewTransactionCollector( logTimeToFinalizedExecuted: logTimeToFinalizedExecuted, timeToFinalized: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_finalized_seconds", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionTiming, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionTiming, Help: "the duration of how long it took between the transaction was received until it was finalized", Objectives: map[float64]float64{ 0.01: 0.001, @@ -64,8 +65,8 @@ func NewTransactionCollector( }), timeToExecuted: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_executed_seconds", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionTiming, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionTiming, Help: "the duration of how long it took between the transaction was received until it was executed", Objectives: map[float64]float64{ 0.01: 0.001, @@ -78,8 +79,8 @@ func NewTransactionCollector( }), timeToFinalizedExecuted: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_finalized_executed_seconds", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionTiming, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionTiming, Help: "the duration of how long it took between the transaction was received until it was both " + "finalized and executed", Objectives: map[float64]float64{ @@ -93,46 +94,46 @@ func NewTransactionCollector( }), transactionSubmission: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "transaction_submission", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "counter for the success/failure of transaction submissions", }, []string{"result"}), scriptExecutedDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "script_executed_duration", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "histogram for the duration in ms of the round trip time for executing a script", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }, []string{"script_size"}), scriptExecutionErrorOnExecutor: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "script_execution_error_archive", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "histogram for the internal errors for executing a script for a block on the archive node", }, []string{"source"}), scriptExecutionComparison: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "script_execution_comparison", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "histogram for the comparison outcomes of executing a script on the archive and execution node", }, []string{"outcome"}), transactionResultDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "transaction_result_fetched_duration", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "histogram for the duration in ms of the round trip time for getting a transaction result", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }, []string{"payload_size"}), scriptSize: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "script_size", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "histogram for the script size in kb of scripts used in ExecuteScript", }), transactionSize: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "transaction_size", - Namespace: namespaceAccess, - Subsystem: subsystemTransactionSubmission, + Namespace: internal.NamespaceAccess, + Subsystem: internal.SubsystemTransactionSubmission, Help: "histogram for the transaction size in kb of scripts used in GetTransactionResult", }), } diff --git a/module/metrics/unicast_manager.go b/module/metrics/unicast_manager.go index f790996d490..2e1147ff7a5 100644 --- a/module/metrics/unicast_manager.go +++ b/module/metrics/unicast_manager.go @@ -7,6 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) // UnicastManagerMetrics metrics collector for the unicast manager. @@ -34,8 +35,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamRetriesDueToDialBackoff = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: uc.prefix + "attempts_to_create_stream_due_to_in_progress_dial_total", Help: "the number of times a stream creation is retried due to a dial in progress", Buckets: []float64{1, 2, 3}, @@ -44,8 +45,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamTimeDueToDialBackoff = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: uc.prefix + "overall_time_to_create_stream_seconds", Help: "the amount of time it takes to create a stream successfully in seconds including the time to create a connection when needed", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, @@ -54,8 +55,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.dialPeerRetries = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: uc.prefix + "attempts_to_dial_peer_total", Help: "number of retry attempts before a connection is established successfully", Buckets: []float64{1, 2, 3}, @@ -64,8 +65,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.dialPeerTime = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: uc.prefix + "time_to_dial_peer_seconds", Help: "the amount of time it takes to dial a peer and establish a connection during stream creation", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, @@ -74,8 +75,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamOnConnRetries = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: uc.prefix + "attempts_to_create_stream_on_connection_total", Help: "number of retry attempts before a stream is created on the available connection between two peers", Buckets: []float64{1, 2, 3}, @@ -84,8 +85,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamOnConnTime = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: namespaceNetwork, - Subsystem: subsystemGossip, + Namespace: internal.NamespaceNetwork, + Subsystem: internal.SubsystemGossip, Name: uc.prefix + "time_to_create_stream_on_connection_seconds", Help: "the amount of time it takes to create a stream on the available connection between two peers", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, diff --git a/module/metrics/verification.go b/module/metrics/verification.go index b89d2bc1584..f980eb77312 100644 --- a/module/metrics/verification.go +++ b/module/metrics/verification.go @@ -4,6 +4,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/metrics/internal" ) type VerificationCollector struct { @@ -48,109 +49,109 @@ func NewVerificationCollector(tracer module.Tracer, registerer prometheus.Regist // Job Consumers lastProcessedBlockJobIndexBlockConsumer := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "last_processed_block_job_index", - Namespace: namespaceVerification, - Subsystem: subsystemBlockConsumer, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemBlockConsumer, Help: "the last block job index processed by block consumer", }) lastProcessedChunkJobIndexChunkConsumer := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "last_processed_chunk_job_index", - Namespace: namespaceVerification, - Subsystem: subsystemChunkConsumer, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemChunkConsumer, Help: "the last chunk job index processed by chunk consumer", }) // Assigner Engine receivedFinalizedHeightAssigner := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "finalized_height", - Namespace: namespaceVerification, - Subsystem: subsystemAssignerEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemAssignerEngine, Help: "the last finalized height received by assigner engine", }) receivedResultsTotalAssigner := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "received_result_total", - Namespace: namespaceVerification, - Subsystem: subsystemAssignerEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemAssignerEngine, Help: "total number of execution results received by assigner engine", }) assignedChunksTotalAssigner := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_assigned_total", - Namespace: namespaceVerification, - Subsystem: subsystemAssignerEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemAssignerEngine, Help: "total number of chunks assigned to verification node", }) sentChunksTotalAssigner := prometheus.NewCounter(prometheus.CounterOpts{ Name: "processed_chunk_sent_total", - Namespace: namespaceVerification, - Subsystem: subsystemAssignerEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemAssignerEngine, Help: "total number chunks sent by assigner engine to chunk consumer", }) // Fetcher Engine receivedAssignedChunksTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "assigned_chunk_received_total", - Namespace: namespaceVerification, - Subsystem: subsystemFetcherEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemFetcherEngine, Help: "total number of chunks received by fetcher engine from assigner engine through chunk consumer", }) // Requester Engine receivedChunkDataPackRequestsTotalRequester := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_request_received_total", - Namespace: namespaceVerification, - Subsystem: subsystemRequesterEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemRequesterEngine, Help: "total number of chunk data pack requests received by requester engine from fetcher engine", }) sentChunkDataRequestMessagesTotalRequester := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_request_message_sent_total", - Namespace: namespaceVerification, - Subsystem: subsystemRequesterEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemRequesterEngine, Help: "total number of chunk data pack request messages sent in the network by requester engine", }) receivedChunkDataResponseMessagesTotalRequester := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_response_message_received_total", - Namespace: namespaceVerification, - Subsystem: subsystemRequesterEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemRequesterEngine, Help: "total number of chunk data response messages received from network by requester engine", }) sentChunkDataPackByRequesterTotal := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_sent_total", - Namespace: namespaceVerification, - Subsystem: subsystemRequesterEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemRequesterEngine, Help: "total number of chunk data packs sent by requester engine to fetcher engine", }) sentVerifiableChunksTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "verifiable_chunk_sent_total", - Namespace: namespaceVerification, - Subsystem: subsystemFetcherEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemFetcherEngine, Help: "total number of verifiable chunks sent by fetcher engine to verifier engine", }) receivedChunkDataPackTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_received_total", - Namespace: namespaceVerification, - Subsystem: subsystemFetcherEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemFetcherEngine, Help: "total number of chunk data packs received by fetcher engine", }) requestedChunkDataPackTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_requested_total", - Namespace: namespaceVerification, - Subsystem: subsystemFetcherEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemFetcherEngine, Help: "total number of chunk data packs requested by fetcher engine", }) maxChunkDataPackRequestAttemptForNextUnsealedHeight := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "next_unsealed_height_max_chunk_data_pack_request_attempt_times", - Namespace: namespaceVerification, - Subsystem: subsystemRequesterEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemRequesterEngine, // an indicator for when execution nodes is unresponsive to chunk data pack requests, // in which case verification node will keep requesting the chunk data pack, and this // metrics number will go up. @@ -161,15 +162,15 @@ func NewVerificationCollector(tracer module.Tracer, registerer prometheus.Regist // Verifier Engine receivedVerifiableChunksTotalVerifier := prometheus.NewCounter(prometheus.CounterOpts{ Name: "verifiable_chunk_received_total", - Namespace: namespaceVerification, - Subsystem: subsystemVerifierEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemVerifierEngine, Help: "total number verifiable chunks received by verifier engine from fetcher engine", }) sentResultApprovalTotalVerifier := prometheus.NewCounter(prometheus.CounterOpts{ Name: "result_approvals_total", - Namespace: namespaceVerification, - Subsystem: subsystemVerifierEngine, + Namespace: internal.NamespaceVerification, + Subsystem: internal.SubsystemVerifierEngine, Help: "total number of emitted result approvals by verifier engine", }) diff --git a/module/state_synchronization/execution_data_requester.go b/module/state_synchronization/execution_data_requester.go index dd479455698..2a022a160f6 100644 --- a/module/state_synchronization/execution_data_requester.go +++ b/module/state_synchronization/execution_data_requester.go @@ -3,11 +3,11 @@ package state_synchronization import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // OnExecutionDataReceivedConsumer is a callback that is called ExecutionData is received for a new block -type OnExecutionDataReceivedConsumer func(*execution_data.BlockExecutionDataEntity) +type OnExecutionDataReceivedConsumer func(*model2.BlockExecutionDataEntity) // ExecutionDataRequester is a component that syncs ExecutionData from the network, and exposes // a callback that is called when a new ExecutionData is received diff --git a/module/state_synchronization/requester/distributer.go b/module/state_synchronization/requester/distributer.go index ded5ebb95a2..2e0b95535e8 100644 --- a/module/state_synchronization/requester/distributer.go +++ b/module/state_synchronization/requester/distributer.go @@ -3,7 +3,7 @@ package requester import ( "sync" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/state_synchronization" ) @@ -27,7 +27,7 @@ func (p *ExecutionDataDistributor) AddOnExecutionDataReceivedConsumer(consumer s } // OnExecutionDataReceived is called when new execution data is received -func (p *ExecutionDataDistributor) OnExecutionDataReceived(executionData *execution_data.BlockExecutionDataEntity) { +func (p *ExecutionDataDistributor) OnExecutionDataReceived(executionData *model.BlockExecutionDataEntity) { p.lock.Lock() defer p.lock.Unlock() diff --git a/module/state_synchronization/requester/execution_data_requester.go b/module/state_synchronization/requester/execution_data_requester.go index 6cc1a828e91..a622b70b44a 100644 --- a/module/state_synchronization/requester/execution_data_requester.go +++ b/module/state_synchronization/requester/execution_data_requester.go @@ -17,6 +17,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/module/state_synchronization" @@ -440,7 +441,7 @@ func (e *executionDataRequester) processNotificationJob(ctx irrecoverable.Signal e.metrics.NotificationSent(entry.Height) } -func (e *executionDataRequester) notifyConsumers(executionData *execution_data.BlockExecutionDataEntity) { +func (e *executionDataRequester) notifyConsumers(executionData *model2.BlockExecutionDataEntity) { e.consumerMu.RLock() defer e.consumerMu.RUnlock() diff --git a/module/state_synchronization/requester/execution_data_requester_test.go b/module/state_synchronization/requester/execution_data_requester_test.go index 5ac29329094..37fe1dac1a3 100644 --- a/module/state_synchronization/requester/execution_data_requester_test.go +++ b/module/state_synchronization/requester/execution_data_requester_test.go @@ -25,6 +25,7 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" @@ -75,7 +76,7 @@ type testExecutionDataServiceEntry struct { // When set (and fn is unset), this error will be returned for any calls to Get for this ED Err error // Otherwise, the execution data will be returned directly with no error - ExecutionData *execution_data.BlockExecutionData + ExecutionData *model2.BlockExecutionData } type specialBlockGenerator func(int) map[uint64]testExecutionDataCallback @@ -85,12 +86,12 @@ type edTestRun struct { specialBlocks specialBlockGenerator } -type testExecutionDataCallback func(*execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) +type testExecutionDataCallback func(*model2.BlockExecutionData) (*model2.BlockExecutionData, error) func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { downloader := new(exedatamock.Downloader) - get := func(id flow.Identifier) (*execution_data.BlockExecutionData, error) { + get := func(id flow.Identifier) (*model2.BlockExecutionData, error) { ed, has := edStore[id] // return not found @@ -114,7 +115,7 @@ func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) downloader.On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). Return( - func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { + func(ctx context.Context, id flow.Identifier) *model2.BlockExecutionData { ed, _ := get(id) return ed }, @@ -325,7 +326,7 @@ func generateBlocksWithSomeMissed(blockCount int) map[uint64]testExecutionDataCa failures := rand.Intn(3) + 1 attempts := 0 - missing[i] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { + missing[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { if attempts < failures*2 { // this func is run twice for every attempt by the mock (once for ExecutionData one for errors) attempts++ // This should fail the first n fetch attempts @@ -348,7 +349,7 @@ func generateBlocksWithRandomDelays(blockCount int) map[uint64]testExecutionData continue } - delays[i] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { + delays[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { time.Sleep(time.Duration(rand.Intn(25)) * time.Millisecond) return ed, nil } @@ -364,7 +365,7 @@ func generateBlocksWithHaltingError(blockCount int) (specialBlockGenerator, erro generate := func(int) map[uint64]testExecutionDataCallback { return map[uint64]testExecutionDataCallback{ - height: func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { + height: func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { return nil, err }, } @@ -376,7 +377,7 @@ func generatePauseResume(pauseHeight uint64) (specialBlockGenerator, func()) { pause := make(chan struct{}) blocks := map[uint64]testExecutionDataCallback{} - blocks[pauseHeight] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { + blocks[pauseHeight] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { <-pause return ed, nil } @@ -530,8 +531,8 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTest(edr state_synchroniza return fetchedExecutionData } -func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData) func(ed *execution_data.BlockExecutionDataEntity) { - return func(ed *execution_data.BlockExecutionDataEntity) { +func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData) func(ed *model2.BlockExecutionDataEntity) { + return func(ed *model2.BlockExecutionDataEntity) { if _, has := fetchedExecutionData[ed.BlockID]; has { suite.T().Errorf("duplicate execution data for block %s", ed.BlockID) return @@ -568,7 +569,7 @@ func (suite *ExecutionDataRequesterSuite) finalizeBlocks(cfg *fetchTestRun, foll } } -type receivedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData +type receivedExecutionData map[flow.Identifier]*model2.BlockExecutionData type fetchTestRun struct { sealedCount int startHeight uint64 @@ -578,14 +579,14 @@ type fetchTestRun struct { resultsByID map[flow.Identifier]*flow.ExecutionResult resultsByBlockID map[flow.Identifier]*flow.ExecutionResult sealsByBlockID map[flow.Identifier]*flow.Seal - executionDataByID map[flow.Identifier]*execution_data.BlockExecutionData + executionDataByID map[flow.Identifier]*model2.BlockExecutionData executionDataEntries map[flow.Identifier]*testExecutionDataServiceEntry executionDataIDByBlockID map[flow.Identifier]flow.Identifier expectedIrrecoverable error stopHeight uint64 resumeHeight uint64 - fetchedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData + fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData waitTimeout time.Duration maxSearchAhead uint64 @@ -629,7 +630,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci resultsByID := map[flow.Identifier]*flow.ExecutionResult{} resultsByBlockID := map[flow.Identifier]*flow.ExecutionResult{} sealsByBlockID := map[flow.Identifier]*flow.Seal{} - executionDataByID := map[flow.Identifier]*execution_data.BlockExecutionData{} + executionDataByID := map[flow.Identifier]*model2.BlockExecutionData{} executionDataIDByBlockID := map[flow.Identifier]flow.Identifier{} sealedCount := blockCount - 4 // seals for blocks 1-96 diff --git a/module/state_synchronization/requester/jobs/execution_data_reader.go b/module/state_synchronization/requester/jobs/execution_data_reader.go index bd5f7adbeae..44bdcd6ca4a 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader.go @@ -7,8 +7,8 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) @@ -17,7 +17,7 @@ import ( type BlockEntry struct { BlockID flow.Identifier Height uint64 - ExecutionData *execution_data.BlockExecutionDataEntity + ExecutionData *model.BlockExecutionDataEntity } var _ module.Jobs = (*ExecutionDataReader)(nil) diff --git a/module/state_synchronization/requester/jobs/execution_data_reader_test.go b/module/state_synchronization/requester/jobs/execution_data_reader_test.go index 90240c83dd8..df8b7162bcf 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader_test.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader_test.go @@ -13,9 +13,9 @@ import ( "github.com/onflow/flow-go/engine/access/state_stream" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" @@ -36,7 +36,7 @@ type ExecutionDataReaderSuite struct { fetchTimeout time.Duration executionDataID flow.Identifier - executionData *execution_data.BlockExecutionData + executionData *model.BlockExecutionData block *flow.Block blocksByHeight map[uint64]*flow.Block @@ -106,9 +106,9 @@ func (suite *ExecutionDataReaderSuite) reset() { } func (suite *ExecutionDataReaderSuite) TestAtIndex() { - setExecutionDataGet := func(executionData *execution_data.BlockExecutionData, err error) { + setExecutionDataGet := func(executionData *model.BlockExecutionData, err error) { suite.downloader.On("Get", mock.Anything, suite.executionDataID).Return( - func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { + func(ctx context.Context, id flow.Identifier) *model.BlockExecutionData { return executionData }, func(ctx context.Context, id flow.Identifier) error { @@ -139,7 +139,7 @@ func (suite *ExecutionDataReaderSuite) TestAtIndex() { ed := unittest.BlockExecutionDataFixture() setExecutionDataGet(ed, nil) - edEntity := execution_data.NewBlockExecutionDataEntity(suite.executionDataID, ed) + edEntity := model.NewBlockExecutionDataEntity(suite.executionDataID, ed) job, err := suite.reader.AtIndex(suite.block.Header.Height) require.NoError(suite.T(), err) diff --git a/network/alsp/manager/manager.go b/network/alsp/manager/manager.go index f29fbc694b4..71bda556749 100644 --- a/network/alsp/manager/manager.go +++ b/network/alsp/manager/manager.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/alsp/internal" @@ -177,7 +178,7 @@ func NewMisbehaviorReportManager(cfg *MisbehaviorReportManagerConfig, consumer n store := queue.NewHeroStore( cfg.SpamReportQueueSize, lg.With().Str("component", "spam_record_queue").Logger(), - metrics.ApplicationLayerSpamRecordQueueMetricsFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) + networkmetrics.ApplicationLayerSpamRecordQueueMetricsFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) m.workerPool = worker.NewWorkerPoolBuilder[internal.ReportedMisbehaviorWork]( cfg.Logger, @@ -191,7 +192,7 @@ func NewMisbehaviorReportManager(cfg *MisbehaviorReportManagerConfig, consumer n m.cache = m.cacheFactory( lg, cfg.SpamRecordCacheSize, - metrics.ApplicationLayerSpamRecordCacheMetricFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) + networkmetrics.ApplicationLayerSpamRecordCacheMetricFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) builder := component.NewComponentManagerBuilder() builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { diff --git a/network/errors.go b/network/errors.go index 5c4485324e2..fd54da920f9 100644 --- a/network/errors.go +++ b/network/errors.go @@ -6,7 +6,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) var ( diff --git a/network/internal/p2putils/utils.go b/network/internal/p2putils/utils.go index 2325df8734a..d5787e5b289 100644 --- a/network/internal/p2putils/utils.go +++ b/network/internal/p2putils/utils.go @@ -14,8 +14,8 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" + "github.com/onflow/flow-go/utils/p2plogging" ) // FlowStream returns the Flow protocol Stream in the connection if one exist, else it returns nil diff --git a/network/p2p/blob/blob_service.go b/network/p2p/blob/blob_service.go index 7f8d06e56c1..879afaec92c 100644 --- a/network/p2p/blob/blob_service.go +++ b/network/p2p/blob/blob_service.go @@ -30,8 +30,8 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ipld "github.com/ipfs/go-ipld-format" ) diff --git a/network/p2p/cache/gossipsub_spam_records.go b/network/p2p/cache/gossipsub_spam_records.go index 265c2befbb7..d14b13aed46 100644 --- a/network/p2p/cache/gossipsub_spam_records.go +++ b/network/p2p/cache/gossipsub_spam_records.go @@ -13,7 +13,7 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // GossipSubSpamRecordCache is a cache for storing the gossipsub spam records of peers. It is thread-safe. diff --git a/network/p2p/connection/connection_gater.go b/network/p2p/connection/connection_gater.go index 4bcfb16c9e0..9f8eb9015c4 100644 --- a/network/p2p/connection/connection_gater.go +++ b/network/p2p/connection/connection_gater.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) var _ p2p.ConnectionGater = (*ConnGater)(nil) diff --git a/network/p2p/connection/connection_gater_test.go b/network/p2p/connection/connection_gater_test.go index 5a2c678b15c..2c5b4c04e0b 100644 --- a/network/p2p/connection/connection_gater_test.go +++ b/network/p2p/connection/connection_gater_test.go @@ -21,9 +21,9 @@ import ( "github.com/onflow/flow-go/network/p2p/connection" mockp2p "github.com/onflow/flow-go/network/p2p/mock" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" - "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/stream" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/connection/connector.go b/network/p2p/connection/connector.go index 69fbb5d4359..7ff75d91103 100644 --- a/network/p2p/connection/connector.go +++ b/network/p2p/connection/connector.go @@ -8,8 +8,8 @@ import ( "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/rand" ) diff --git a/network/p2p/connection/internal/loggerNotifiee.go b/network/p2p/connection/internal/loggerNotifiee.go index ce49c6081a8..1558a1f4c9b 100644 --- a/network/p2p/connection/internal/loggerNotifiee.go +++ b/network/p2p/connection/internal/loggerNotifiee.go @@ -6,7 +6,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) type LoggerNotifiee struct { diff --git a/network/p2p/connection/peerManager.go b/network/p2p/connection/peerManager.go index d8e323813fd..8105bee50de 100644 --- a/network/p2p/connection/peerManager.go +++ b/network/p2p/connection/peerManager.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/rand" ) diff --git a/network/p2p/dht/dht.go b/network/p2p/dht/dht.go index 930df0e2251..1b522f7a7af 100644 --- a/network/p2p/dht/dht.go +++ b/network/p2p/dht/dht.go @@ -10,7 +10,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // This produces a new IPFS DHT diff --git a/network/p2p/inspector/validation/control_message_validation_inspector.go b/network/p2p/inspector/validation/control_message_validation_inspector.go index 32f475de8d1..534b7d70e1e 100644 --- a/network/p2p/inspector/validation/control_message_validation_inspector.go +++ b/network/p2p/inspector/validation/control_message_validation_inspector.go @@ -23,10 +23,10 @@ import ( "github.com/onflow/flow-go/network/p2p/inspector/internal/ratelimit" p2pmsg "github.com/onflow/flow-go/network/p2p/message" "github.com/onflow/flow-go/network/p2p/p2pconf" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" flowrand "github.com/onflow/flow-go/utils/rand" ) diff --git a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go index a1d55ab873c..da12aae4227 100644 --- a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go +++ b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go @@ -15,6 +15,7 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/distributor" @@ -246,16 +247,16 @@ func defaultInspectorSuite(rpcTracker p2p.RPCControlTracking) p2p.GossipSubRpcIn inspectorCfg.GossipSubRPCMetricsInspectorConfigs.NumberOfWorkers, []queue.HeroStoreConfigOption{ queue.WithHeroStoreSizeLimit(inspectorCfg.GossipSubRPCMetricsInspectorConfigs.CacheSize), - queue.WithHeroStoreCollector(metrics.GossipSubRPCMetricsObserverInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType)), + queue.WithHeroStoreCollector(networkmetrics.GossipSubRPCMetricsObserverInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType)), }...) notificationDistributor := distributor.DefaultGossipSubInspectorNotificationDistributor( logger, []queue.HeroStoreConfigOption{ queue.WithHeroStoreSizeLimit(inspectorCfg.GossipSubRPCInspectorNotificationCacheSize), - queue.WithHeroStoreCollector(metrics.RpcInspectorNotificationQueueMetricFactory(heroCacheMetricsFactory, networkType))}...) + queue.WithHeroStoreCollector(networkmetrics.RpcInspectorNotificationQueueMetricFactory(heroCacheMetricsFactory, networkType))}...) - inspectMsgQueueCacheCollector := metrics.GossipSubRPCInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType) - clusterPrefixedCacheCollector := metrics.GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(heroCacheMetricsFactory, networkType) + inspectMsgQueueCacheCollector := networkmetrics.GossipSubRPCInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType) + clusterPrefixedCacheCollector := networkmetrics.GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(heroCacheMetricsFactory, networkType) rpcValidationInspector, err := validation.NewControlMsgValidationInspector( logger, sporkId, diff --git a/network/p2p/p2pbuilder/utils.go b/network/p2p/p2pbuilder/utils.go index ef2a2bc1ae9..becc7a024ae 100644 --- a/network/p2p/p2pbuilder/utils.go +++ b/network/p2p/p2pbuilder/utils.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) const keyResourceManagerLimit = "libp2p_resource_manager_limit" diff --git a/network/p2p/p2pnet/network.go b/network/p2p/p2pnet/network.go index 3b280ecaeae..5daa297fa4b 100644 --- a/network/p2p/p2pnet/network.go +++ b/network/p2p/p2pnet/network.go @@ -31,7 +31,6 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/blob" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet/internal" "github.com/onflow/flow-go/network/p2p/ping" "github.com/onflow/flow-go/network/p2p/subscription" @@ -43,6 +42,7 @@ import ( flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/p2pnode/gossipsubMetrics.go b/network/p2p/p2pnode/gossipsubMetrics.go index 4a06b7e6e7a..b36c6ec4fc4 100644 --- a/network/p2p/p2pnode/gossipsubMetrics.go +++ b/network/p2p/p2pnode/gossipsubMetrics.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // GossipSubControlMessageMetrics is a metrics and observability wrapper component for the incoming RPCs to a diff --git a/network/p2p/p2pnode/libp2pNode.go b/network/p2p/p2pnode/libp2pNode.go index 074d76b45a6..baafbc12f80 100644 --- a/network/p2p/p2pnode/libp2pNode.go +++ b/network/p2p/p2pnode/libp2pNode.go @@ -25,10 +25,10 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnode/internal" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/p2pnode/libp2pNode_test.go b/network/p2p/p2pnode/libp2pNode_test.go index ad42ec17108..9dcc350f044 100644 --- a/network/p2p/p2pnode/libp2pNode_test.go +++ b/network/p2p/p2pnode/libp2pNode_test.go @@ -25,12 +25,12 @@ import ( "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnode" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/utils" validator "github.com/onflow/flow-go/network/validator/pubsub" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/p2pnode/protocolPeerCache.go b/network/p2p/p2pnode/protocolPeerCache.go index 125d9aa3b37..a8861cd7508 100644 --- a/network/p2p/p2pnode/protocolPeerCache.go +++ b/network/p2p/p2pnode/protocolPeerCache.go @@ -11,7 +11,7 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/rs/zerolog" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // ProtocolPeerCache store a mapping from protocol ID to peers who support that protocol diff --git a/network/p2p/scoring/registry.go b/network/p2p/scoring/registry.go index a1174e165b9..7705ed45c86 100644 --- a/network/p2p/scoring/registry.go +++ b/network/p2p/scoring/registry.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/network/p2p" netcache "github.com/onflow/flow-go/network/p2p/cache" p2pmsg "github.com/onflow/flow-go/network/p2p/message" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/test/topic_validator_test.go b/network/p2p/test/topic_validator_test.go index 21fd328cf1a..3ea250cba6c 100644 --- a/network/p2p/test/topic_validator_test.go +++ b/network/p2p/test/topic_validator_test.go @@ -24,13 +24,13 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/tracer/gossipSubMeshTracer.go b/network/p2p/tracer/gossipSubMeshTracer.go index cbd3d18d409..bd3420ada5f 100644 --- a/network/p2p/tracer/gossipSubMeshTracer.go +++ b/network/p2p/tracer/gossipSubMeshTracer.go @@ -14,11 +14,12 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/tracer/internal" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( @@ -80,8 +81,8 @@ func NewGossipSubMeshTracer(config *GossipSubMeshTracerConfig) *GossipSubMeshTra rpcSentTracker := internal.NewRPCSentTracker(&internal.RPCSentTrackerConfig{ Logger: lg, RPCSentCacheSize: config.RpcSentTrackerCacheSize, - RPCSentCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), - WorkerQueueCacheCollector: metrics.GossipSubRPCSentTrackerQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + RPCSentCacheCollector: networkmetrics.GossipSubRPCSentTrackerMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + WorkerQueueCacheCollector: networkmetrics.GossipSubRPCSentTrackerQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), WorkerQueueCacheSize: config.RpcSentTrackerWorkerQueueCacheSize, NumOfWorkers: config.RpcSentTrackerNumOfWorkers, LastHighestIhavesSentResetInterval: defaultLastHighestIHaveRPCSizeResetInterval, diff --git a/network/p2p/tracer/gossipSubScoreTracer.go b/network/p2p/tracer/gossipSubScoreTracer.go index b28189ec624..1c1568f1f24 100644 --- a/network/p2p/tracer/gossipSubScoreTracer.go +++ b/network/p2p/tracer/gossipSubScoreTracer.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/translator/identity_provider_translator.go b/network/p2p/translator/identity_provider_translator.go index 8156f2e22a2..5e109e23145 100644 --- a/network/p2p/translator/identity_provider_translator.go +++ b/network/p2p/translator/identity_provider_translator.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // IdentityProviderIDTranslator implements an `p2p.IDTranslator` which provides ID diff --git a/network/p2p/unicast/errors.go b/network/p2p/unicast/errors.go index d8abb2624f7..817560e1b8f 100644 --- a/network/p2p/unicast/errors.go +++ b/network/p2p/unicast/errors.go @@ -6,7 +6,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // ErrDialInProgress indicates that the libp2p node is currently dialing the peer. diff --git a/network/p2p/unicast/manager.go b/network/p2p/unicast/manager.go index a03f5f1de2b..895262fb59a 100644 --- a/network/p2p/unicast/manager.go +++ b/network/p2p/unicast/manager.go @@ -17,9 +17,9 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/stream" + "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/unicast/stream/errors.go b/network/p2p/unicast/stream/errors.go index 9c73294c52b..f7f2f187c3b 100644 --- a/network/p2p/unicast/stream/errors.go +++ b/network/p2p/unicast/stream/errors.go @@ -7,7 +7,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) // ErrSecurityProtocolNegotiationFailed indicates security protocol negotiation failed during the stream factory connect attempt. diff --git a/network/test/unicast_authorization_test.go b/network/test/unicast_authorization_test.go index b9cec9ba8b0..4d25481b3d8 100644 --- a/network/test/unicast_authorization_test.go +++ b/network/test/unicast_authorization_test.go @@ -23,9 +23,9 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/validator" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/validator/authorized_sender_validator.go b/network/validator/authorized_sender_validator.go index 69d925661a1..6fb2f195b34 100644 --- a/network/validator/authorized_sender_validator.go +++ b/network/validator/authorized_sender_validator.go @@ -13,7 +13,7 @@ import ( "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" + "github.com/onflow/flow-go/utils/p2plogging" ) var ( diff --git a/network/validator/pubsub/topic_validator.go b/network/validator/pubsub/topic_validator.go index 078f9272b12..d92b84e327f 100644 --- a/network/validator/pubsub/topic_validator.go +++ b/network/validator/pubsub/topic_validator.go @@ -12,10 +12,10 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/validator" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" + "github.com/onflow/flow-go/utils/p2plogging" ) // messagePubKey extracts the public key of the envelope signer from a libp2p message. diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/utils/p2plogging/internal/peerIdCache.go similarity index 90% rename from network/p2p/p2plogging/internal/peerIdCache.go rename to utils/p2plogging/internal/peerIdCache.go index f5655a73756..527d2a71b7b 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/utils/p2plogging/internal/peerIdCache.go @@ -2,13 +2,11 @@ package internal import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" - "github.com/onflow/flow-go/module/metrics" ) var _ flow.Entity = (*peerIdCacheEntry)(nil) @@ -21,12 +19,10 @@ func NewPeerIdCache(size uint32) *PeerIdCache { return &PeerIdCache{ peerCache: stdmap.NewBackend( stdmap.WithBackData( - herocache.NewCache( + herocache.NewCacheWithNoopLoggerAndMetrics( size, herocache.DefaultOversizeFactor, - heropool.LRUEjection, - zerolog.Nop(), - metrics.NewNoopCollector()))), + heropool.LRUEjection))), } } diff --git a/network/p2p/p2plogging/internal/peerIdCache_test.go b/utils/p2plogging/internal/peerIdCache_test.go similarity index 90% rename from network/p2p/p2plogging/internal/peerIdCache_test.go rename to utils/p2plogging/internal/peerIdCache_test.go index 13279276a95..d85246cd6be 100644 --- a/network/p2p/p2plogging/internal/peerIdCache_test.go +++ b/utils/p2plogging/internal/peerIdCache_test.go @@ -5,9 +5,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/onflow/flow-go/network/internal/p2pfixtures" - "github.com/onflow/flow-go/network/p2p/p2plogging/internal" p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/p2plogging/internal" ) func TestNewPeerIdCache(t *testing.T) { @@ -56,9 +55,9 @@ func TestPeerIdCache_EjectionScenarios(t *testing.T) { assert.Equal(t, uint(0), cache.Size()) // add peer IDs to fill the cache - pid1 := p2pfixtures.PeerIdFixture(t) - pid2 := p2pfixtures.PeerIdFixture(t) - pid3 := p2pfixtures.PeerIdFixture(t) + pid1 := p2ptest.PeerIdFixture(t) + pid2 := p2ptest.PeerIdFixture(t) + pid3 := p2ptest.PeerIdFixture(t) cache.PeerIdString(pid1) assert.Equal(t, uint(1), cache.Size()) @@ -74,7 +73,7 @@ func TestPeerIdCache_EjectionScenarios(t *testing.T) { assert.Equal(t, uint(3), cache.Size()) // add a new peer ID - pid4 := p2pfixtures.PeerIdFixture(t) + pid4 := p2ptest.PeerIdFixture(t) cache.PeerIdString(pid4) assert.Equal(t, uint(3), cache.Size()) diff --git a/network/p2p/p2plogging/logging.go b/utils/p2plogging/logging.go similarity index 92% rename from network/p2p/p2plogging/logging.go rename to utils/p2plogging/logging.go index e4f2e93ad7d..165cb538e28 100644 --- a/network/p2p/p2plogging/logging.go +++ b/utils/p2plogging/logging.go @@ -3,7 +3,7 @@ package p2plogging import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/onflow/flow-go/network/p2p/p2plogging/internal" + "github.com/onflow/flow-go/utils/p2plogging/internal" ) // peerIdCache is a global cache of peer ids, it is used to avoid expensive base58 encoding of peer ids. diff --git a/network/p2p/p2plogging/logging_test.go b/utils/p2plogging/logging_test.go similarity index 96% rename from network/p2p/p2plogging/logging_test.go rename to utils/p2plogging/logging_test.go index 3717f226631..4c57af84a0f 100644 --- a/network/p2p/p2plogging/logging_test.go +++ b/utils/p2plogging/logging_test.go @@ -6,8 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index 331f2ecb674..ab6a2770dcf 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -16,6 +16,7 @@ import ( "github.com/onflow/cadence" sdk "github.com/onflow/flow-go-sdk" + "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network/message" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -2432,22 +2433,22 @@ func GetFlowProtocolEventID( return flow.HashToID(eventIDHash) } -func WithBlockExecutionDataBlockID(blockID flow.Identifier) func(*execution_data.BlockExecutionData) { - return func(bed *execution_data.BlockExecutionData) { +func WithBlockExecutionDataBlockID(blockID flow.Identifier) func(*model.BlockExecutionData) { + return func(bed *model.BlockExecutionData) { bed.BlockID = blockID } } -func WithChunkExecutionDatas(chunks ...*execution_data.ChunkExecutionData) func(*execution_data.BlockExecutionData) { - return func(bed *execution_data.BlockExecutionData) { +func WithChunkExecutionDatas(chunks ...*model.ChunkExecutionData) func(*model.BlockExecutionData) { + return func(bed *model.BlockExecutionData) { bed.ChunkExecutionDatas = chunks } } -func BlockExecutionDataFixture(opts ...func(*execution_data.BlockExecutionData)) *execution_data.BlockExecutionData { - bed := &execution_data.BlockExecutionData{ +func BlockExecutionDataFixture(opts ...func(*model.BlockExecutionData)) *model.BlockExecutionData { + bed := &model.BlockExecutionData{ BlockID: IdentifierFixture(), - ChunkExecutionDatas: []*execution_data.ChunkExecutionData{}, + ChunkExecutionDatas: []*model.ChunkExecutionData{}, } for _, opt := range opts { @@ -2457,13 +2458,13 @@ func BlockExecutionDataFixture(opts ...func(*execution_data.BlockExecutionData)) return bed } -func BlockExecutionDatEntityFixture(opts ...func(*execution_data.BlockExecutionData)) *execution_data.BlockExecutionDataEntity { +func BlockExecutionDatEntityFixture(opts ...func(*model.BlockExecutionData)) *model.BlockExecutionDataEntity { execData := BlockExecutionDataFixture(opts...) - return execution_data.NewBlockExecutionDataEntity(IdentifierFixture(), execData) + return model.NewBlockExecutionDataEntity(IdentifierFixture(), execData) } -func BlockExecutionDatEntityListFixture(n int) []*execution_data.BlockExecutionDataEntity { - l := make([]*execution_data.BlockExecutionDataEntity, n) +func BlockExecutionDatEntityListFixture(n int) []*model.BlockExecutionDataEntity { + l := make([]*model.BlockExecutionDataEntity, n) for i := 0; i < n; i++ { l[i] = BlockExecutionDatEntityFixture() } @@ -2471,21 +2472,21 @@ func BlockExecutionDatEntityListFixture(n int) []*execution_data.BlockExecutionD return l } -func WithChunkEvents(events flow.EventsList) func(*execution_data.ChunkExecutionData) { - return func(conf *execution_data.ChunkExecutionData) { +func WithChunkEvents(events flow.EventsList) func(*model.ChunkExecutionData) { + return func(conf *model.ChunkExecutionData) { conf.Events = events } } -func WithTrieUpdate(trieUpdate *ledger.TrieUpdate) func(*execution_data.ChunkExecutionData) { - return func(conf *execution_data.ChunkExecutionData) { +func WithTrieUpdate(trieUpdate *ledger.TrieUpdate) func(*model.ChunkExecutionData) { + return func(conf *model.ChunkExecutionData) { conf.TrieUpdate = trieUpdate } } -func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*execution_data.ChunkExecutionData)) *execution_data.ChunkExecutionData { +func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*model.ChunkExecutionData)) *model.ChunkExecutionData { collection := CollectionFixture(5) - ced := &execution_data.ChunkExecutionData{ + ced := &model.ChunkExecutionData{ Collection: &collection, Events: flow.EventsList{}, TrieUpdate: testutils.TrieUpdateFixture(2, 1, 8), From 11a741aebd125cc4c7c8bdaa466cab9c5a3259d9 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:16:46 -0700 Subject: [PATCH 18/25] wip fixing import cycles --- .../execution_data_requester.go | 4 +-- .../requester/execution_data_requester.go | 4 +-- .../execution_data_requester_test.go | 30 +++++++++---------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/module/state_synchronization/execution_data_requester.go b/module/state_synchronization/execution_data_requester.go index 2a022a160f6..306d6e5b8d4 100644 --- a/module/state_synchronization/execution_data_requester.go +++ b/module/state_synchronization/execution_data_requester.go @@ -3,11 +3,11 @@ package state_synchronization import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/module/component" - model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + executiondatamodel "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // OnExecutionDataReceivedConsumer is a callback that is called ExecutionData is received for a new block -type OnExecutionDataReceivedConsumer func(*model2.BlockExecutionDataEntity) +type OnExecutionDataReceivedConsumer func(*executiondatamodel.BlockExecutionDataEntity) // ExecutionDataRequester is a component that syncs ExecutionData from the network, and exposes // a callback that is called when a new ExecutionData is received diff --git a/module/state_synchronization/requester/execution_data_requester.go b/module/state_synchronization/requester/execution_data_requester.go index a622b70b44a..d1e55997d5c 100644 --- a/module/state_synchronization/requester/execution_data_requester.go +++ b/module/state_synchronization/requester/execution_data_requester.go @@ -17,7 +17,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + executiondatamodel "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/module/state_synchronization" @@ -441,7 +441,7 @@ func (e *executionDataRequester) processNotificationJob(ctx irrecoverable.Signal e.metrics.NotificationSent(entry.Height) } -func (e *executionDataRequester) notifyConsumers(executionData *model2.BlockExecutionDataEntity) { +func (e *executionDataRequester) notifyConsumers(executionData *executiondatamodel.BlockExecutionDataEntity) { e.consumerMu.RLock() defer e.consumerMu.RUnlock() diff --git a/module/state_synchronization/requester/execution_data_requester_test.go b/module/state_synchronization/requester/execution_data_requester_test.go index 37fe1dac1a3..4f88beb6ab6 100644 --- a/module/state_synchronization/requester/execution_data_requester_test.go +++ b/module/state_synchronization/requester/execution_data_requester_test.go @@ -25,7 +25,7 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" - model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + executiondatamodel "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" @@ -76,7 +76,7 @@ type testExecutionDataServiceEntry struct { // When set (and fn is unset), this error will be returned for any calls to Get for this ED Err error // Otherwise, the execution data will be returned directly with no error - ExecutionData *model2.BlockExecutionData + ExecutionData *executiondatamodel.BlockExecutionData } type specialBlockGenerator func(int) map[uint64]testExecutionDataCallback @@ -86,12 +86,12 @@ type edTestRun struct { specialBlocks specialBlockGenerator } -type testExecutionDataCallback func(*model2.BlockExecutionData) (*model2.BlockExecutionData, error) +type testExecutionDataCallback func(*executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { downloader := new(exedatamock.Downloader) - get := func(id flow.Identifier) (*model2.BlockExecutionData, error) { + get := func(id flow.Identifier) (*executiondatamodel.BlockExecutionData, error) { ed, has := edStore[id] // return not found @@ -115,7 +115,7 @@ func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) downloader.On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). Return( - func(ctx context.Context, id flow.Identifier) *model2.BlockExecutionData { + func(ctx context.Context, id flow.Identifier) *executiondatamodel.BlockExecutionData { ed, _ := get(id) return ed }, @@ -326,7 +326,7 @@ func generateBlocksWithSomeMissed(blockCount int) map[uint64]testExecutionDataCa failures := rand.Intn(3) + 1 attempts := 0 - missing[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + missing[i] = func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { if attempts < failures*2 { // this func is run twice for every attempt by the mock (once for ExecutionData one for errors) attempts++ // This should fail the first n fetch attempts @@ -349,7 +349,7 @@ func generateBlocksWithRandomDelays(blockCount int) map[uint64]testExecutionData continue } - delays[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + delays[i] = func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { time.Sleep(time.Duration(rand.Intn(25)) * time.Millisecond) return ed, nil } @@ -365,7 +365,7 @@ func generateBlocksWithHaltingError(blockCount int) (specialBlockGenerator, erro generate := func(int) map[uint64]testExecutionDataCallback { return map[uint64]testExecutionDataCallback{ - height: func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + height: func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { return nil, err }, } @@ -377,7 +377,7 @@ func generatePauseResume(pauseHeight uint64) (specialBlockGenerator, func()) { pause := make(chan struct{}) blocks := map[uint64]testExecutionDataCallback{} - blocks[pauseHeight] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + blocks[pauseHeight] = func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { <-pause return ed, nil } @@ -531,8 +531,8 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTest(edr state_synchroniza return fetchedExecutionData } -func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData) func(ed *model2.BlockExecutionDataEntity) { - return func(ed *model2.BlockExecutionDataEntity) { +func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*executiondatamodel.BlockExecutionData) func(ed *executiondatamodel.BlockExecutionDataEntity) { + return func(ed *executiondatamodel.BlockExecutionDataEntity) { if _, has := fetchedExecutionData[ed.BlockID]; has { suite.T().Errorf("duplicate execution data for block %s", ed.BlockID) return @@ -569,7 +569,7 @@ func (suite *ExecutionDataRequesterSuite) finalizeBlocks(cfg *fetchTestRun, foll } } -type receivedExecutionData map[flow.Identifier]*model2.BlockExecutionData +type receivedExecutionData map[flow.Identifier]*executiondatamodel.BlockExecutionData type fetchTestRun struct { sealedCount int startHeight uint64 @@ -579,14 +579,14 @@ type fetchTestRun struct { resultsByID map[flow.Identifier]*flow.ExecutionResult resultsByBlockID map[flow.Identifier]*flow.ExecutionResult sealsByBlockID map[flow.Identifier]*flow.Seal - executionDataByID map[flow.Identifier]*model2.BlockExecutionData + executionDataByID map[flow.Identifier]*executiondatamodel.BlockExecutionData executionDataEntries map[flow.Identifier]*testExecutionDataServiceEntry executionDataIDByBlockID map[flow.Identifier]flow.Identifier expectedIrrecoverable error stopHeight uint64 resumeHeight uint64 - fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData + fetchedExecutionData map[flow.Identifier]*executiondatamodel.BlockExecutionData waitTimeout time.Duration maxSearchAhead uint64 @@ -630,7 +630,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci resultsByID := map[flow.Identifier]*flow.ExecutionResult{} resultsByBlockID := map[flow.Identifier]*flow.ExecutionResult{} sealsByBlockID := map[flow.Identifier]*flow.Seal{} - executionDataByID := map[flow.Identifier]*model2.BlockExecutionData{} + executionDataByID := map[flow.Identifier]*executiondatamodel.BlockExecutionData{} executionDataIDByBlockID := map[flow.Identifier]flow.Identifier{} sealedCount := blockCount - 4 // seals for blocks 1-96 From d227c9cb09d3072d065a0d3a2559fa774191c35d Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:16:46 -0700 Subject: [PATCH 19/25] Revert "wip fixing import cycles" This reverts commit 11a741aebd125cc4c7c8bdaa466cab9c5a3259d9. --- .../execution_data_requester.go | 4 +-- .../requester/execution_data_requester.go | 4 +-- .../execution_data_requester_test.go | 30 +++++++++---------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/module/state_synchronization/execution_data_requester.go b/module/state_synchronization/execution_data_requester.go index 306d6e5b8d4..2a022a160f6 100644 --- a/module/state_synchronization/execution_data_requester.go +++ b/module/state_synchronization/execution_data_requester.go @@ -3,11 +3,11 @@ package state_synchronization import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/module/component" - executiondatamodel "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // OnExecutionDataReceivedConsumer is a callback that is called ExecutionData is received for a new block -type OnExecutionDataReceivedConsumer func(*executiondatamodel.BlockExecutionDataEntity) +type OnExecutionDataReceivedConsumer func(*model2.BlockExecutionDataEntity) // ExecutionDataRequester is a component that syncs ExecutionData from the network, and exposes // a callback that is called when a new ExecutionData is received diff --git a/module/state_synchronization/requester/execution_data_requester.go b/module/state_synchronization/requester/execution_data_requester.go index d1e55997d5c..a622b70b44a 100644 --- a/module/state_synchronization/requester/execution_data_requester.go +++ b/module/state_synchronization/requester/execution_data_requester.go @@ -17,7 +17,7 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - executiondatamodel "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/module/state_synchronization" @@ -441,7 +441,7 @@ func (e *executionDataRequester) processNotificationJob(ctx irrecoverable.Signal e.metrics.NotificationSent(entry.Height) } -func (e *executionDataRequester) notifyConsumers(executionData *executiondatamodel.BlockExecutionDataEntity) { +func (e *executionDataRequester) notifyConsumers(executionData *model2.BlockExecutionDataEntity) { e.consumerMu.RLock() defer e.consumerMu.RUnlock() diff --git a/module/state_synchronization/requester/execution_data_requester_test.go b/module/state_synchronization/requester/execution_data_requester_test.go index 4f88beb6ab6..37fe1dac1a3 100644 --- a/module/state_synchronization/requester/execution_data_requester_test.go +++ b/module/state_synchronization/requester/execution_data_requester_test.go @@ -25,7 +25,7 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" - executiondatamodel "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" @@ -76,7 +76,7 @@ type testExecutionDataServiceEntry struct { // When set (and fn is unset), this error will be returned for any calls to Get for this ED Err error // Otherwise, the execution data will be returned directly with no error - ExecutionData *executiondatamodel.BlockExecutionData + ExecutionData *model2.BlockExecutionData } type specialBlockGenerator func(int) map[uint64]testExecutionDataCallback @@ -86,12 +86,12 @@ type edTestRun struct { specialBlocks specialBlockGenerator } -type testExecutionDataCallback func(*executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) +type testExecutionDataCallback func(*model2.BlockExecutionData) (*model2.BlockExecutionData, error) func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { downloader := new(exedatamock.Downloader) - get := func(id flow.Identifier) (*executiondatamodel.BlockExecutionData, error) { + get := func(id flow.Identifier) (*model2.BlockExecutionData, error) { ed, has := edStore[id] // return not found @@ -115,7 +115,7 @@ func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) downloader.On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). Return( - func(ctx context.Context, id flow.Identifier) *executiondatamodel.BlockExecutionData { + func(ctx context.Context, id flow.Identifier) *model2.BlockExecutionData { ed, _ := get(id) return ed }, @@ -326,7 +326,7 @@ func generateBlocksWithSomeMissed(blockCount int) map[uint64]testExecutionDataCa failures := rand.Intn(3) + 1 attempts := 0 - missing[i] = func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { + missing[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { if attempts < failures*2 { // this func is run twice for every attempt by the mock (once for ExecutionData one for errors) attempts++ // This should fail the first n fetch attempts @@ -349,7 +349,7 @@ func generateBlocksWithRandomDelays(blockCount int) map[uint64]testExecutionData continue } - delays[i] = func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { + delays[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { time.Sleep(time.Duration(rand.Intn(25)) * time.Millisecond) return ed, nil } @@ -365,7 +365,7 @@ func generateBlocksWithHaltingError(blockCount int) (specialBlockGenerator, erro generate := func(int) map[uint64]testExecutionDataCallback { return map[uint64]testExecutionDataCallback{ - height: func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { + height: func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { return nil, err }, } @@ -377,7 +377,7 @@ func generatePauseResume(pauseHeight uint64) (specialBlockGenerator, func()) { pause := make(chan struct{}) blocks := map[uint64]testExecutionDataCallback{} - blocks[pauseHeight] = func(ed *executiondatamodel.BlockExecutionData) (*executiondatamodel.BlockExecutionData, error) { + blocks[pauseHeight] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { <-pause return ed, nil } @@ -531,8 +531,8 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTest(edr state_synchroniza return fetchedExecutionData } -func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*executiondatamodel.BlockExecutionData) func(ed *executiondatamodel.BlockExecutionDataEntity) { - return func(ed *executiondatamodel.BlockExecutionDataEntity) { +func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData) func(ed *model2.BlockExecutionDataEntity) { + return func(ed *model2.BlockExecutionDataEntity) { if _, has := fetchedExecutionData[ed.BlockID]; has { suite.T().Errorf("duplicate execution data for block %s", ed.BlockID) return @@ -569,7 +569,7 @@ func (suite *ExecutionDataRequesterSuite) finalizeBlocks(cfg *fetchTestRun, foll } } -type receivedExecutionData map[flow.Identifier]*executiondatamodel.BlockExecutionData +type receivedExecutionData map[flow.Identifier]*model2.BlockExecutionData type fetchTestRun struct { sealedCount int startHeight uint64 @@ -579,14 +579,14 @@ type fetchTestRun struct { resultsByID map[flow.Identifier]*flow.ExecutionResult resultsByBlockID map[flow.Identifier]*flow.ExecutionResult sealsByBlockID map[flow.Identifier]*flow.Seal - executionDataByID map[flow.Identifier]*executiondatamodel.BlockExecutionData + executionDataByID map[flow.Identifier]*model2.BlockExecutionData executionDataEntries map[flow.Identifier]*testExecutionDataServiceEntry executionDataIDByBlockID map[flow.Identifier]flow.Identifier expectedIrrecoverable error stopHeight uint64 resumeHeight uint64 - fetchedExecutionData map[flow.Identifier]*executiondatamodel.BlockExecutionData + fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData waitTimeout time.Duration maxSearchAhead uint64 @@ -630,7 +630,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci resultsByID := map[flow.Identifier]*flow.ExecutionResult{} resultsByBlockID := map[flow.Identifier]*flow.ExecutionResult{} sealsByBlockID := map[flow.Identifier]*flow.Seal{} - executionDataByID := map[flow.Identifier]*executiondatamodel.BlockExecutionData{} + executionDataByID := map[flow.Identifier]*model2.BlockExecutionData{} executionDataIDByBlockID := map[flow.Identifier]flow.Identifier{} sealedCount := blockCount - 4 // seals for blocks 1-96 From 4fc6d8fe62326ca3595ed5ea6c8721565c1c2d29 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:15:46 -0700 Subject: [PATCH 20/25] Revert "wip fixing import cycles" This reverts commit 92b49339b6734c6162e4ba46c60aa450eb3afffc. --- .../node_builder/access_node_builder.go | 9 +- cmd/collection/main.go | 3 +- cmd/execution_builder.go | 3 +- cmd/observer/node_builder/observer_builder.go | 11 +- cmd/scaffold.go | 9 +- cmd/verification_builder.go | 3 +- engine/access/state_stream/backend.go | 7 +- .../state_stream/backend_executiondata.go | 5 +- .../backend_executiondata_test.go | 11 +- engine/access/state_stream/engine.go | 3 +- engine/access/state_stream/mock/api.go | 2 +- .../mock/get_execution_data_func.go | 3 +- engine/common/rpc/convert/execution_data.go | 16 +- .../common/rpc/convert/execution_data_test.go | 8 +- engine/execution/block_result.go | 10 +- .../computation/computer/result_collector.go | 4 +- .../retryable_uploader_wrapper_test.go | 10 +- engine/execution/state/unittest/fixtures.go | 3 +- engine/execution/testutil/fixtures.go | 6 +- follower/follower_builder.go | 11 +- insecure/cmd/corrupted_builder.go | 4 +- insecure/corruptlibp2p/pubsub_adapter.go | 2 +- module/chunks/chunkVerifier.go | 3 +- module/chunks/chunkVerifier_test.go | 5 +- .../execution_data/cache/cache.go | 11 +- .../execution_data/downloader.go | 13 +- .../execution_data/downloader_test.go | 3 +- .../execution_data/{model => }/entity.go | 2 +- .../{model => }/execution_data.go | 2 +- .../execution_data/mock/downloader.go | 2 +- .../mock/execution_data_store.go | 2 +- .../execution_data/serializer.go | 5 +- .../executiondatasync/execution_data/store.go | 21 +- .../execution_data/store_test.go | 25 +- .../executiondatasync/execution_data/util.go | 3 +- module/executiondatasync/provider/provider.go | 15 +- .../provider/provider_test.go | 15 +- module/mempool/execution_data.go | 8 +- module/mempool/herocache/backdata/cache.go | 7 - module/mempool/herocache/execution_data.go | 14 +- .../mempool/herocache/execution_data_test.go | 6 +- module/mempool/mock/execution_data.go | 2 +- module/metrics/access.go | 37 ++- module/metrics/alsp.go | 5 +- module/metrics/badger.go | 26 +- module/metrics/bitswap.go | 38 ++- module/metrics/cache.go | 17 +- module/metrics/chainsync.go | 25 +- module/metrics/cleaner.go | 6 +- module/metrics/collection.go | 15 +- module/metrics/compliance.go | 65 ++-- module/metrics/consensus.go | 17 +- module/metrics/cruisectl.go | 22 +- module/metrics/engine.go | 21 +- module/metrics/execution.go | 281 +++++++++--------- module/metrics/execution_data_requester.go | 29 +- module/metrics/execution_data_sync.go | 66 ++-- module/metrics/gossipsub.go | 37 ++- .../gossipsub_rpc_validation_inspector.go | 17 +- module/metrics/gossipsub_score.go | 37 ++- module/metrics/herocache.go | 131 +++++++- module/metrics/hotstuff.go | 73 +++-- module/metrics/internal/namespaces.go | 111 ------- module/metrics/libp2p_resource_manager.go | 59 ++-- module/metrics/loader.go | 12 +- module/metrics/mempool.go | 5 +- module/metrics/namespaces.go | 111 +++++++ module/metrics/network.go | 79 +++-- module/metrics/network/herocache.go | 131 -------- module/metrics/observer.go | 6 +- module/metrics/ping.go | 13 +- module/metrics/rate_limited_blockstore.go | 5 +- module/metrics/rest_api.go | 17 +- module/metrics/transaction.go | 41 ++- module/metrics/unicast_manager.go | 25 +- module/metrics/verification.go | 69 +++-- .../execution_data_requester.go | 4 +- .../requester/distributer.go | 4 +- .../requester/execution_data_requester.go | 3 +- .../execution_data_requester_test.go | 29 +- .../requester/jobs/execution_data_reader.go | 4 +- .../jobs/execution_data_reader_test.go | 10 +- network/alsp/manager/manager.go | 5 +- network/errors.go | 2 +- network/internal/p2putils/utils.go | 2 +- network/p2p/blob/blob_service.go | 2 +- network/p2p/cache/gossipsub_spam_records.go | 2 +- network/p2p/connection/connection_gater.go | 2 +- .../p2p/connection/connection_gater_test.go | 2 +- network/p2p/connection/connector.go | 2 +- .../p2p/connection/internal/loggerNotifiee.go | 2 +- network/p2p/connection/peerManager.go | 2 +- network/p2p/dht/dht.go | 2 +- .../control_message_validation_inspector.go | 2 +- .../p2pbuilder/gossipsub/gossipSubBuilder.go | 9 +- network/p2p/p2pbuilder/utils.go | 2 +- .../p2p}/p2plogging/internal/peerIdCache.go | 8 +- .../p2plogging/internal/peerIdCache_test.go | 11 +- {utils => network/p2p}/p2plogging/logging.go | 2 +- .../p2p}/p2plogging/logging_test.go | 2 +- network/p2p/p2pnet/network.go | 2 +- network/p2p/p2pnode/gossipsubMetrics.go | 2 +- network/p2p/p2pnode/libp2pNode.go | 2 +- network/p2p/p2pnode/libp2pNode_test.go | 2 +- network/p2p/p2pnode/protocolPeerCache.go | 2 +- network/p2p/scoring/registry.go | 2 +- network/p2p/test/topic_validator_test.go | 2 +- network/p2p/tracer/gossipSubMeshTracer.go | 7 +- network/p2p/tracer/gossipSubScoreTracer.go | 2 +- .../identity_provider_translator.go | 2 +- network/p2p/unicast/errors.go | 2 +- network/p2p/unicast/manager.go | 2 +- network/p2p/unicast/stream/errors.go | 2 +- network/test/unicast_authorization_test.go | 2 +- .../validator/authorized_sender_validator.go | 2 +- network/validator/pubsub/topic_validator.go | 2 +- utils/unittest/fixtures.go | 35 ++- 117 files changed, 1031 insertions(+), 1112 deletions(-) rename module/executiondatasync/execution_data/{model => }/entity.go (97%) rename module/executiondatasync/execution_data/{model => }/execution_data.go (97%) delete mode 100644 module/metrics/internal/namespaces.go create mode 100644 module/metrics/namespaces.go delete mode 100644 module/metrics/network/herocache.go rename {utils => network/p2p}/p2plogging/internal/peerIdCache.go (90%) rename {utils => network/p2p}/p2plogging/internal/peerIdCache_test.go (90%) rename {utils => network/p2p}/p2plogging/logging.go (92%) rename {utils => network/p2p}/p2plogging/logging_test.go (96%) diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 9591eb1c99b..d5b0e688cd4 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -58,7 +58,6 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/module/metrics/unstaked" "github.com/onflow/flow-go/module/state_synchronization" edrequester "github.com/onflow/flow-go/module/state_synchronization/requester" @@ -368,7 +367,7 @@ func (builder *FlowAccessNodeBuilder) buildFollowerEngine() *FlowAccessNodeBuild builder.Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( @@ -1253,9 +1252,9 @@ func (builder *FlowAccessNodeBuilder) enqueuePublicNetworkInit() { msgValidators := publicNetworkMsgValidators(node.Logger.With().Bool("public", true).Logger(), node.IdentityProvider, builder.NodeID) receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - networkmetrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) - err := node.Metrics.Mempool.Register(networkmetrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } @@ -1360,7 +1359,7 @@ func (builder *FlowAccessNodeBuilder) initPublicLibp2pNode(networkKey crypto.Pri }, &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: networkmetrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, meshTracer). SetBasicResolver(builder.Resolver). diff --git a/cmd/collection/main.go b/cmd/collection/main.go index 2368a43cbf1..f285911bfdd 100644 --- a/cmd/collection/main.go +++ b/cmd/collection/main.go @@ -46,7 +46,6 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" @@ -319,7 +318,7 @@ func main() { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( diff --git a/cmd/execution_builder.go b/cmd/execution_builder.go index 05542dd8546..8aa1e46884a 100644 --- a/cmd/execution_builder.go +++ b/cmd/execution_builder.go @@ -74,7 +74,6 @@ import ( finalizer "github.com/onflow/flow-go/module/finalizer/consensus" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p/blob" @@ -979,7 +978,7 @@ func (exeNode *ExecutionNode) LoadFollowerEngine( var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( diff --git a/cmd/observer/node_builder/observer_builder.go b/cmd/observer/node_builder/observer_builder.go index 44e33b4a2bb..ff6e5ff6da5 100644 --- a/cmd/observer/node_builder/observer_builder.go +++ b/cmd/observer/node_builder/observer_builder.go @@ -47,7 +47,6 @@ import ( "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" consensus_follower "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" alspmgr "github.com/onflow/flow-go/network/alsp/manager" @@ -61,6 +60,7 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/p2pbuilder" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/tracer" @@ -75,7 +75,6 @@ import ( "github.com/onflow/flow-go/state/protocol/events/gadgets" "github.com/onflow/flow-go/utils/grpcutils" "github.com/onflow/flow-go/utils/io" - "github.com/onflow/flow-go/utils/p2plogging" ) // ObserverBuilder extends cmd.NodeBuilder and declares additional functions needed to bootstrap an Access node @@ -349,7 +348,7 @@ func (builder *ObserverServiceBuilder) buildFollowerEngine() *ObserverServiceBui builder.Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) } packer := hotsignature.NewConsensusSigDataPacker(builder.Committee) verifier := verification.NewCombinedVerifier(builder.Committee, packer) // verifier for HotStuff signature constructs (QCs, TCs, votes) @@ -719,7 +718,7 @@ func (builder *ObserverServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr p2pconfig.PeerManagerDisableConfig(), // disable peer manager for observer node. &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: networkmetrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, meshTracer). SetSubscriptionFilter( @@ -795,9 +794,9 @@ func (builder *ObserverServiceBuilder) enqueuePublicNetworkInit() { Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - networkmetrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) - err := node.Metrics.Mempool.Register(networkmetrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 99c1a627baa..ebfba2614c1 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -39,7 +39,6 @@ import ( "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/module/profiler" "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/module/updatable_configs" @@ -245,8 +244,8 @@ func (fnb *FlowNodeBuilder) EnqueueResolver() { var dnsIpCacheMetricsCollector module.HeroCacheMetrics = metrics.NewNoopCollector() var dnsTxtCacheMetricsCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if fnb.HeroCacheMetricsEnable { - dnsIpCacheMetricsCollector = networkmetrics.NetworkDnsIpCacheMetricsFactory(fnb.MetricsRegisterer) - dnsTxtCacheMetricsCollector = networkmetrics.NetworkDnsTxtCacheMetricsFactory(fnb.MetricsRegisterer) + dnsIpCacheMetricsCollector = metrics.NetworkDnsIpCacheMetricsFactory(fnb.MetricsRegisterer) + dnsTxtCacheMetricsCollector = metrics.NetworkDnsTxtCacheMetricsFactory(fnb.MetricsRegisterer) } cache := herocache.NewDNSCache( @@ -366,7 +365,7 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { &fnb.FlowConfig.NetworkConfig.ConnectionManagerConfig, &p2p.DisallowListCacheConfig{ MaxSize: fnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: networkmetrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + Metrics: metrics.DisallowListCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork), }) if err != nil { @@ -438,7 +437,7 @@ func (fnb *FlowNodeBuilder) InitFlowNetworkWithConduitFactory( receiveCache := netcache.NewHeroReceiveCache(fnb.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, fnb.Logger, - networkmetrics.NetworkReceiveCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(fnb.HeroCacheMetricsFactory(), network.PrivateNetwork)) err := node.Metrics.Mempool.Register(metrics.ResourceNetworkingReceiveCache, receiveCache.Size) if err != nil { diff --git a/cmd/verification_builder.go b/cmd/verification_builder.go index af9b265678c..ea5ddf65a8e 100644 --- a/cmd/verification_builder.go +++ b/cmd/verification_builder.go @@ -33,7 +33,6 @@ import ( "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/state/protocol" badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" @@ -360,7 +359,7 @@ func (v *VerificationNodeBuilder) LoadComponentsAndModules() { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) } core, err := followereng.NewComplianceCore( diff --git a/engine/access/state_stream/backend.go b/engine/access/state_stream/backend.go index 324da01400b..33c5e18cb77 100644 --- a/engine/access/state_stream/backend.go +++ b/engine/access/state_stream/backend.go @@ -15,7 +15,6 @@ import ( "github.com/onflow/flow-go/module/counters" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" ) @@ -36,11 +35,11 @@ const ( DefaultResponseLimit = float64(0) ) -type GetExecutionDataFunc func(context.Context, uint64) (*model.BlockExecutionDataEntity, error) +type GetExecutionDataFunc func(context.Context, uint64) (*execution_data.BlockExecutionDataEntity, error) type GetStartHeightFunc func(flow.Identifier, uint64) (uint64, error) type API interface { - GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*model.BlockExecutionData, error) + GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) SubscribeExecutionData(ctx context.Context, startBlockID flow.Identifier, startBlockHeight uint64) Subscription SubscribeEvents(ctx context.Context, startBlockID flow.Identifier, startHeight uint64, filter EventFilter) Subscription } @@ -127,7 +126,7 @@ func New( // getExecutionData returns the execution data for the given block height. // Expected errors during normal operation: // - storage.ErrNotFound or execution_data.BlobNotFoundError: execution data for the given block height is not available. -func (b *StateStreamBackend) getExecutionData(ctx context.Context, height uint64) (*model.BlockExecutionDataEntity, error) { +func (b *StateStreamBackend) getExecutionData(ctx context.Context, height uint64) (*execution_data.BlockExecutionDataEntity, error) { // fail early if no notification has been received for the given block height. // note: it's possible for the data to exist in the data store before the notification is // received. this ensures a consistent view is available to all streams. diff --git a/engine/access/state_stream/backend_executiondata.go b/engine/access/state_stream/backend_executiondata.go index 40e48be4003..0443c6ba9ba 100644 --- a/engine/access/state_stream/backend_executiondata.go +++ b/engine/access/state_stream/backend_executiondata.go @@ -14,13 +14,12 @@ import ( "github.com/onflow/flow-go/engine/common/rpc" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/storage" ) type ExecutionDataResponse struct { Height uint64 - ExecutionData *model.BlockExecutionData + ExecutionData *execution_data.BlockExecutionData } type ExecutionDataBackend struct { @@ -35,7 +34,7 @@ type ExecutionDataBackend struct { getStartHeight GetStartHeightFunc } -func (b *ExecutionDataBackend) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*model.BlockExecutionData, error) { +func (b *ExecutionDataBackend) GetExecutionDataByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionData, error) { header, err := b.headers.ByBlockID(blockID) if err != nil { return nil, fmt.Errorf("could not get block header for %s: %w", blockID, err) diff --git a/engine/access/state_stream/backend_executiondata_test.go b/engine/access/state_stream/backend_executiondata_test.go index 524a304a942..361cb64aa80 100644 --- a/engine/access/state_stream/backend_executiondata_test.go +++ b/engine/access/state_stream/backend_executiondata_test.go @@ -20,7 +20,6 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" protocolmock "github.com/onflow/flow-go/state/protocol/mock" @@ -54,7 +53,7 @@ type BackendExecutionDataSuite struct { blocks []*flow.Block blockEvents map[flow.Identifier]flow.EventsList - execDataMap map[flow.Identifier]*model.BlockExecutionDataEntity + execDataMap map[flow.Identifier]*execution_data.BlockExecutionDataEntity blockMap map[uint64]*flow.Block sealMap map[flow.Identifier]*flow.Seal resultMap map[flow.Identifier]*flow.ExecutionResult @@ -90,7 +89,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { var err error blockCount := 5 - s.execDataMap = make(map[flow.Identifier]*model.BlockExecutionDataEntity, blockCount) + s.execDataMap = make(map[flow.Identifier]*execution_data.BlockExecutionDataEntity, blockCount) s.blockEvents = make(map[flow.Identifier]flow.EventsList, blockCount) s.blockMap = make(map[uint64]*flow.Block, blockCount) s.sealMap = make(map[flow.Identifier]*flow.Seal, blockCount) @@ -114,7 +113,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { blockEvents := unittest.BlockEventsFixture(block.Header, (i%len(testEventTypes))*3+1, testEventTypes...) numChunks := 5 - chunkDatas := make([]*model.ChunkExecutionData, 0, numChunks) + chunkDatas := make([]*execution_data.ChunkExecutionData, 0, numChunks) for i := 0; i < numChunks; i++ { var events flow.EventsList switch { @@ -125,7 +124,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { default: events = flow.EventsList{blockEvents.Events[i]} } - chunkDatas = append(chunkDatas, unittest.ChunkExecutionDataFixture(s.T(), model.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events))) + chunkDatas = append(chunkDatas, unittest.ChunkExecutionDataFixture(s.T(), execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events))) } execData := unittest.BlockExecutionDataFixture( unittest.WithBlockExecutionDataBlockID(block.ID()), @@ -136,7 +135,7 @@ func (s *BackendExecutionDataSuite) SetupTest() { assert.NoError(s.T(), err) s.blocks = append(s.blocks, block) - s.execDataMap[block.ID()] = model.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) + s.execDataMap[block.ID()] = execution_data.NewBlockExecutionDataEntity(result.ExecutionDataID, execData) s.blockEvents[block.ID()] = blockEvents.Events s.blockMap[block.Header.Height] = block s.sealMap[block.ID()] = seal diff --git a/engine/access/state_stream/engine.go b/engine/access/state_stream/engine.go index 64d67e2f65c..cb3a3e73813 100644 --- a/engine/access/state_stream/engine.go +++ b/engine/access/state_stream/engine.go @@ -12,7 +12,6 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/grpcserver" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/state/protocol" @@ -131,7 +130,7 @@ func NewEng( // The caller must guarantee that execution data is locally available for all blocks with // heights between the initialBlockHeight provided during startup and the block height of // the execution data provided. -func (e *Engine) OnExecutionData(executionData *model.BlockExecutionDataEntity) { +func (e *Engine) OnExecutionData(executionData *execution_data.BlockExecutionDataEntity) { lg := e.log.With().Hex("block_id", logging.ID(executionData.BlockID)).Logger() lg.Trace().Msg("received execution data") diff --git a/engine/access/state_stream/mock/api.go b/engine/access/state_stream/mock/api.go index c4779c1f24c..5b57efc917f 100644 --- a/engine/access/state_stream/mock/api.go +++ b/engine/access/state_stream/mock/api.go @@ -6,7 +6,7 @@ import ( context "context" flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" mock "github.com/stretchr/testify/mock" diff --git a/engine/access/state_stream/mock/get_execution_data_func.go b/engine/access/state_stream/mock/get_execution_data_func.go index ee872a740a9..50fe8087e21 100644 --- a/engine/access/state_stream/mock/get_execution_data_func.go +++ b/engine/access/state_stream/mock/get_execution_data_func.go @@ -5,8 +5,7 @@ package mock import ( context "context" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" - + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" mock "github.com/stretchr/testify/mock" ) diff --git a/engine/common/rpc/convert/execution_data.go b/engine/common/rpc/convert/execution_data.go index 100a00c37fe..21d2297e16a 100644 --- a/engine/common/rpc/convert/execution_data.go +++ b/engine/common/rpc/convert/execution_data.go @@ -10,11 +10,11 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" ) // BlockExecutionDataToMessage converts a BlockExecutionData to a protobuf message -func BlockExecutionDataToMessage(data *model.BlockExecutionData) ( +func BlockExecutionDataToMessage(data *execution_data.BlockExecutionData) ( *entities.BlockExecutionData, error, ) { @@ -36,11 +36,11 @@ func BlockExecutionDataToMessage(data *model.BlockExecutionData) ( func MessageToBlockExecutionData( m *entities.BlockExecutionData, chain flow.Chain, -) (*model.BlockExecutionData, error) { +) (*execution_data.BlockExecutionData, error) { if m == nil { return nil, ErrEmptyMessage } - chunks := make([]*model.ChunkExecutionData, len(m.ChunkExecutionData)) + chunks := make([]*execution_data.ChunkExecutionData, len(m.ChunkExecutionData)) for i, chunk := range m.GetChunkExecutionData() { convertedChunk, err := MessageToChunkExecutionData(chunk, chain) if err != nil { @@ -49,14 +49,14 @@ func MessageToBlockExecutionData( chunks[i] = convertedChunk } - return &model.BlockExecutionData{ + return &execution_data.BlockExecutionData{ BlockID: MessageToIdentifier(m.GetBlockId()), ChunkExecutionDatas: chunks, }, nil } // ChunkExecutionDataToMessage converts a ChunkExecutionData to a protobuf message -func ChunkExecutionDataToMessage(data *model.ChunkExecutionData) ( +func ChunkExecutionDataToMessage(data *execution_data.ChunkExecutionData) ( *entities.ChunkExecutionData, error, ) { @@ -88,7 +88,7 @@ func ChunkExecutionDataToMessage(data *model.ChunkExecutionData) ( func MessageToChunkExecutionData( m *entities.ChunkExecutionData, chain flow.Chain, -) (*model.ChunkExecutionData, error) { +) (*execution_data.ChunkExecutionData, error) { collection, err := messageToTrustedCollection(m.GetCollection(), chain) if err != nil { return nil, err @@ -107,7 +107,7 @@ func MessageToChunkExecutionData( events = nil } - return &model.ChunkExecutionData{ + return &execution_data.ChunkExecutionData{ Collection: collection, Events: events, TrieUpdate: trieUpdate, diff --git a/engine/common/rpc/convert/execution_data_test.go b/engine/common/rpc/convert/execution_data_test.go index 73ba51a2ce2..59c136c5b8a 100644 --- a/engine/common/rpc/convert/execution_data_test.go +++ b/engine/common/rpc/convert/execution_data_test.go @@ -9,7 +9,7 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/ledger/common/testutils" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/utils/unittest" ) @@ -20,7 +20,7 @@ func TestConvertBlockExecutionData(t *testing.T) { events := unittest.EventsFixture(5) chunks := 5 - chunkData := make([]*model.ChunkExecutionData, 0, chunks) + chunkData := make([]*execution_data.ChunkExecutionData, 0, chunks) for i := 0; i < chunks-1; i++ { ced := unittest.ChunkExecutionDataFixture(t, 0, // updates set explicitly to target 160-320KB per chunk @@ -29,7 +29,7 @@ func TestConvertBlockExecutionData(t *testing.T) { ) chunkData = append(chunkData, ced) } - makeServiceTx := func(ced *model.ChunkExecutionData) { + makeServiceTx := func(ced *execution_data.ChunkExecutionData) { // proposal key and payer are empty addresses for service tx collection := unittest.CollectionFixture(1) collection.Transactions[0].ProposalKey.Address = flow.EmptyAddress @@ -39,7 +39,7 @@ func TestConvertBlockExecutionData(t *testing.T) { // the service chunk sometimes does not have any trie updates ced.TrieUpdate = nil } - chunk := unittest.ChunkExecutionDataFixture(t, model.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events), makeServiceTx) + chunk := unittest.ChunkExecutionDataFixture(t, execution_data.DefaultMaxBlobSize/5, unittest.WithChunkEvents(events), makeServiceTx) chunkData = append(chunkData, chunk) blockData := unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(chunkData...)) diff --git a/engine/execution/block_result.go b/engine/execution/block_result.go index 30e999d2047..cdb6e3d54f2 100644 --- a/engine/execution/block_result.go +++ b/engine/execution/block_result.go @@ -3,7 +3,7 @@ package execution import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool/entity" ) @@ -104,7 +104,7 @@ type BlockAttestationResult struct { // should be available as part of computation result and most likely trieUpdate // was the reason this is kept here, long term we don't need this data and should // act based on register deltas - *model.BlockExecutionData + *execution_data.BlockExecutionData } func NewEmptyBlockAttestationResult( @@ -114,10 +114,10 @@ func NewEmptyBlockAttestationResult( return &BlockAttestationResult{ BlockExecutionResult: blockExecutionResult, collectionAttestationResults: make([]CollectionAttestationResult, 0, colSize), - BlockExecutionData: &model.BlockExecutionData{ + BlockExecutionData: &execution_data.BlockExecutionData{ BlockID: blockExecutionResult.ID(), ChunkExecutionDatas: make( - []*model.ChunkExecutionData, + []*execution_data.ChunkExecutionData, 0, colSize), }, @@ -137,7 +137,7 @@ func (ar *BlockAttestationResult) AppendCollectionAttestationResult( endStateCommit flow.StateCommitment, stateProof flow.StorageProof, eventCommit flow.Identifier, - chunkExecutionDatas *model.ChunkExecutionData, + chunkExecutionDatas *execution_data.ChunkExecutionData, ) { ar.collectionAttestationResults = append(ar.collectionAttestationResults, CollectionAttestationResult{ diff --git a/engine/execution/computation/computer/result_collector.go b/engine/execution/computation/computer/result_collector.go index 564a78df438..8f5631252c2 100644 --- a/engine/execution/computation/computer/result_collector.go +++ b/engine/execution/computation/computer/result_collector.go @@ -19,7 +19,7 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/trace" @@ -156,7 +156,7 @@ func (collector *resultCollector) commitCollection( } col := collection.Collection() - chunkExecData := &model.ChunkExecutionData{ + chunkExecData := &execution_data.ChunkExecutionData{ Collection: &col, Events: events, TrieUpdate: trieUpdate, diff --git a/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go b/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go index 702ffcb6339..491307705eb 100644 --- a/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go +++ b/engine/execution/ingestion/uploader/retryable_uploader_wrapper_test.go @@ -9,8 +9,8 @@ import ( "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" executionDataMock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/module/metrics" @@ -112,7 +112,7 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { testTrieUpdate := &ledger.TrieUpdate{ RootHash: testTrieUpdateRootHash, } - testChunkExecutionDatas := []*model.ChunkExecutionData{ + testChunkExecutionDatas := []*execution_data.ChunkExecutionData{ { TrieUpdate: testTrieUpdate, }, @@ -170,7 +170,7 @@ func Test_ReconstructComputationResultFromStorage(t *testing.T) { mockExecutionDataDowloader := new(executionDataMock.Downloader) mockExecutionDataDowloader.On("Get", mock.Anything, testEDID).Return( - &model.BlockExecutionData{ + &execution_data.BlockExecutionData{ BlockID: testBlockID, ChunkExecutionDatas: testChunkExecutionDatas, }, nil) @@ -260,9 +260,9 @@ func createTestBadgerRetryableUploaderWrapper(asyncUploader *AsyncUploader) *Bad mockExecutionDataDowloader := new(executionDataMock.Downloader) mockExecutionDataDowloader.On("Get", mock.Anything, mock.Anything).Return( - &model.BlockExecutionData{ + &execution_data.BlockExecutionData{ BlockID: flow.ZeroID, - ChunkExecutionDatas: make([]*model.ChunkExecutionData, 0), + ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, 0), }, nil) return NewBadgerRetryableUploaderWrapper( diff --git a/engine/execution/state/unittest/fixtures.go b/engine/execution/state/unittest/fixtures.go index dbe29e25576..117f9e7ed19 100644 --- a/engine/execution/state/unittest/fixtures.go +++ b/engine/execution/state/unittest/fixtures.go @@ -12,7 +12,6 @@ import ( "github.com/onflow/flow-go/fvm/storage/snapshot" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool/entity" "github.com/onflow/flow-go/utils/unittest" ) @@ -46,7 +45,7 @@ func ComputationResultForBlockFixture( computationResult := execution.NewEmptyComputationResult(completeBlock) numberOfChunks := len(collections) + 1 - ceds := make([]*model.ChunkExecutionData, numberOfChunks) + ceds := make([]*execution_data.ChunkExecutionData, numberOfChunks) for i := 0; i < numberOfChunks; i++ { ceds[i] = unittest.ChunkExecutionDataFixture(t, 1024) computationResult.CollectionExecutionResultAt(i).UpdateExecutionSnapshot(StateInteractionsFixture()) diff --git a/engine/execution/testutil/fixtures.go b/engine/execution/testutil/fixtures.go index 8e10da57708..3113f2df9af 100644 --- a/engine/execution/testutil/fixtures.go +++ b/engine/execution/testutil/fixtures.go @@ -26,7 +26,7 @@ import ( "github.com/onflow/flow-go/ledger/complete" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/epochs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/state/protocol" protocolMock "github.com/onflow/flow-go/state/protocol/mock" "github.com/onflow/flow-go/utils/unittest" @@ -610,8 +610,8 @@ func ComputationResultFixture(t *testing.T) *execution.ComputationResult { return &execution.ComputationResult{ BlockExecutionResult: blockExecResult, BlockAttestationResult: &execution.BlockAttestationResult{ - BlockExecutionData: &model.BlockExecutionData{ - ChunkExecutionDatas: []*model.ChunkExecutionData{ + BlockExecutionData: &execution_data.BlockExecutionData{ + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{ {TrieUpdate: trieUpdate1}, {TrieUpdate: trieUpdate2}, {TrieUpdate: trieUpdate3}, diff --git a/follower/follower_builder.go b/follower/follower_builder.go index 01c7c0a7610..2b32040c5b1 100644 --- a/follower/follower_builder.go +++ b/follower/follower_builder.go @@ -36,7 +36,6 @@ import ( "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/local" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/module/upstream" "github.com/onflow/flow-go/network" alspmgr "github.com/onflow/flow-go/network/alsp/manager" @@ -51,6 +50,7 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" "github.com/onflow/flow-go/network/p2p/p2pbuilder" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/p2p/subscription" "github.com/onflow/flow-go/network/p2p/tracer" @@ -63,7 +63,6 @@ import ( badgerState "github.com/onflow/flow-go/state/protocol/badger" "github.com/onflow/flow-go/state/protocol/blocktimer" "github.com/onflow/flow-go/state/protocol/events/gadgets" - "github.com/onflow/flow-go/utils/p2plogging" ) // FlowBuilder extends cmd.NodeBuilder and declares additional functions needed to bootstrap an Access node @@ -243,7 +242,7 @@ func (builder *FollowerServiceBuilder) buildFollowerEngine() *FollowerServiceBui builder.Component("follower engine", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { var heroCacheCollector module.HeroCacheMetrics = metrics.NewNoopCollector() if node.HeroCacheMetricsEnable { - heroCacheCollector = networkmetrics.FollowerCacheMetrics(node.MetricsRegisterer) + heroCacheCollector = metrics.FollowerCacheMetrics(node.MetricsRegisterer) } packer := hotsignature.NewConsensusSigDataPacker(builder.Committee) @@ -598,7 +597,7 @@ func (builder *FollowerServiceBuilder) initPublicLibp2pNode(networkKey crypto.Pr p2pconfig.PeerManagerDisableConfig(), // disable peer manager for follower &p2p.DisallowListCacheConfig{ MaxSize: builder.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: networkmetrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), + Metrics: metrics.DisallowListCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork), }, meshTracer). SetSubscriptionFilter( @@ -673,9 +672,9 @@ func (builder *FollowerServiceBuilder) enqueuePublicNetworkInit() { Component("public network", func(node *cmd.NodeConfig) (module.ReadyDoneAware, error) { receiveCache := netcache.NewHeroReceiveCache(builder.FlowConfig.NetworkConfig.NetworkReceivedMessageCacheSize, builder.Logger, - networkmetrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) + metrics.NetworkReceiveCacheMetricsFactory(builder.HeroCacheMetricsFactory(), network.PublicNetwork)) - err := node.Metrics.Mempool.Register(networkmetrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) + err := node.Metrics.Mempool.Register(metrics.PrependPublicPrefix(metrics.ResourceNetworkingReceiveCache), receiveCache.Size) if err != nil { return nil, fmt.Errorf("could not register networking receive cache metric: %w", err) } diff --git a/insecure/cmd/corrupted_builder.go b/insecure/cmd/corrupted_builder.go index df4ec583b65..6ceaa1b1a2a 100644 --- a/insecure/cmd/corrupted_builder.go +++ b/insecure/cmd/corrupted_builder.go @@ -11,7 +11,7 @@ import ( "github.com/onflow/flow-go/insecure/corruptnet" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/network" + "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/connection" @@ -107,7 +107,7 @@ func (cnb *CorruptedNodeBuilder) enqueueNetworkingLayer() { cnb.FlowConfig.NetworkConfig, &p2p.DisallowListCacheConfig{ MaxSize: cnb.FlowConfig.NetworkConfig.DisallowListNotificationCacheSize, - Metrics: networkmetrics.DisallowListCacheMetricsFactory(cnb.HeroCacheMetricsFactory(), network.PrivateNetwork), + Metrics: metrics.DisallowListCacheMetricsFactory(cnb.HeroCacheMetricsFactory(), network.PrivateNetwork), }, cnb.TopicValidatorDisabled, cnb.WithPubSubMessageSigning, diff --git a/insecure/corruptlibp2p/pubsub_adapter.go b/insecure/corruptlibp2p/pubsub_adapter.go index 54271264d7a..64975a18e3c 100644 --- a/insecure/corruptlibp2p/pubsub_adapter.go +++ b/insecure/corruptlibp2p/pubsub_adapter.go @@ -15,8 +15,8 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) // CorruptGossipSubAdapter is a wrapper around the forked pubsub topic from diff --git a/module/chunks/chunkVerifier.go b/module/chunks/chunkVerifier.go index 017e768ef7f..88453f594b4 100644 --- a/module/chunks/chunkVerifier.go +++ b/module/chunks/chunkVerifier.go @@ -20,7 +20,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" ) @@ -345,7 +344,7 @@ func (fcv *ChunkVerifier) verifyTransactionsInContext( // 2. build our chunk's chunk execution data using the locally calculated values, and calculate // its CID - chunkExecutionData := model.ChunkExecutionData{ + chunkExecutionData := execution_data.ChunkExecutionData{ Collection: cedCollection, Events: events, TrieUpdate: trieUpdate, diff --git a/module/chunks/chunkVerifier_test.go b/module/chunks/chunkVerifier_test.go index fdcac7344d5..b10b4da33b8 100644 --- a/module/chunks/chunkVerifier_test.go +++ b/module/chunks/chunkVerifier_test.go @@ -28,7 +28,6 @@ import ( "github.com/onflow/flow-go/model/verification" "github.com/onflow/flow-go/module/chunks" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -336,7 +335,7 @@ func updateExecutionData(t *testing.T, vch *verification.VerifiableChunkData, co require.NoError(t, err) } - ced := model.ChunkExecutionData{ + ced := execution_data.ChunkExecutionData{ Collection: collection, Events: chunkEvents, TrieUpdate: trieUpdate, @@ -479,7 +478,7 @@ func GetBaselineVerifiableChunk(t *testing.T, script string, system bool) (*veri trieUpdate, err := pathfinder.UpdateToTrieUpdate(update, partial.DefaultPathFinderVersion) require.NoError(t, err) - chunkExecutionData := model.ChunkExecutionData{ + chunkExecutionData := execution_data.ChunkExecutionData{ Collection: &coll, Events: chunkEvents, TrieUpdate: trieUpdate, diff --git a/module/executiondatasync/execution_data/cache/cache.go b/module/executiondatasync/execution_data/cache/cache.go index afe7559c5f7..bfe497aac82 100644 --- a/module/executiondatasync/execution_data/cache/cache.go +++ b/module/executiondatasync/execution_data/cache/cache.go @@ -6,7 +6,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/storage" ) @@ -45,13 +44,13 @@ func NewExecutionDataCache( // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Identifier) (*model.BlockExecutionDataEntity, error) { +func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { execData, err := c.backend.Get(ctx, executionDataID) if err != nil { return nil, err } - return model.NewBlockExecutionDataEntity(executionDataID, execData), nil + return execution_data.NewBlockExecutionDataEntity(executionDataID, execData), nil } // ByBlockID returns the execution data for the given block ID. @@ -61,7 +60,7 @@ func (c *ExecutionDataCache) ByID(ctx context.Context, executionDataID flow.Iden // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identifier) (*model.BlockExecutionDataEntity, error) { +func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identifier) (*execution_data.BlockExecutionDataEntity, error) { if execData, ok := c.cache.ByID(blockID); ok { return execData, nil } @@ -76,7 +75,7 @@ func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identif return nil, err } - execDataEntity := model.NewBlockExecutionDataEntity(executionDataID, execData) + execDataEntity := execution_data.NewBlockExecutionDataEntity(executionDataID, execData) _ = c.cache.Add(execDataEntity) @@ -90,7 +89,7 @@ func (c *ExecutionDataCache) ByBlockID(ctx context.Context, blockID flow.Identif // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (c *ExecutionDataCache) ByHeight(ctx context.Context, height uint64) (*model.BlockExecutionDataEntity, error) { +func (c *ExecutionDataCache) ByHeight(ctx context.Context, height uint64) (*execution_data.BlockExecutionDataEntity, error) { blockID, err := c.headers.BlockIDByHeight(height) if err != nil { return nil, err diff --git a/module/executiondatasync/execution_data/downloader.go b/module/executiondatasync/execution_data/downloader.go index 2befa0a0745..71905342c33 100644 --- a/module/executiondatasync/execution_data/downloader.go +++ b/module/executiondatasync/execution_data/downloader.go @@ -12,7 +12,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network" ) @@ -43,7 +42,7 @@ func WithSerializer(serializer Serializer) DownloaderOption { func NewDownloader(blobService network.BlobService, opts ...DownloaderOption) *downloader { d := &downloader{ blobService, - model.DefaultMaxBlobSize, + DefaultMaxBlobSize, DefaultSerializer, } @@ -70,7 +69,7 @@ func (d *downloader) Done() <-chan struct{} { // - BlobNotFoundError if some CID in the blob tree could not be found from the blob service // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size -func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) (*model.BlockExecutionData, error) { +func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) (*BlockExecutionData, error) { blobGetter := d.blobService.GetSession(ctx) // First, download the root execution data record which contains a list of chunk execution data @@ -83,7 +82,7 @@ func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) ( g, gCtx := errgroup.WithContext(ctx) // Next, download each of the chunk execution data blobs - chunkExecutionDatas := make([]*model.ChunkExecutionData, len(edRoot.ChunkExecutionDataIDs)) + chunkExecutionDatas := make([]*ChunkExecutionData, len(edRoot.ChunkExecutionDataIDs)) for i, chunkDataID := range edRoot.ChunkExecutionDataIDs { i := i chunkDataID := chunkDataID @@ -110,7 +109,7 @@ func (d *downloader) Get(ctx context.Context, executionDataID flow.Identifier) ( } // Finally, recombine data into original record. - bed := &model.BlockExecutionData{ + bed := &BlockExecutionData{ BlockID: edRoot.BlockID, ChunkExecutionDatas: chunkExecutionDatas, } @@ -172,7 +171,7 @@ func (d *downloader) getChunkExecutionData( ctx context.Context, chunkExecutionDataID cid.Cid, blobGetter network.BlobGetter, -) (*model.ChunkExecutionData, error) { +) (*ChunkExecutionData, error) { cids := []cid.Cid{chunkExecutionDataID} // iteratively process each level of the blob tree until a ChunkExecutionData is returned or an @@ -184,7 +183,7 @@ func (d *downloader) getChunkExecutionData( } switch v := v.(type) { - case *model.ChunkExecutionData: + case *ChunkExecutionData: return v, nil case *[]cid.Cid: cids = *v diff --git a/module/executiondatasync/execution_data/downloader_test.go b/module/executiondatasync/execution_data/downloader_test.go index 503b665e356..775f4a68107 100644 --- a/module/executiondatasync/execution_data/downloader_test.go +++ b/module/executiondatasync/execution_data/downloader_test.go @@ -14,7 +14,6 @@ import ( "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network/mocknetwork" ) @@ -23,7 +22,7 @@ func TestCIDNotFound(t *testing.T) { blobService := new(mocknetwork.BlobService) downloader := execution_data.NewDownloader(blobService) edStore := execution_data.NewExecutionDataStore(blobstore, execution_data.DefaultSerializer) - bed := generateBlockExecutionData(t, 10, 3*model.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, 10, 3*execution_data.DefaultMaxBlobSize) edID, err := edStore.Add(context.Background(), bed) require.NoError(t, err) diff --git a/module/executiondatasync/execution_data/model/entity.go b/module/executiondatasync/execution_data/entity.go similarity index 97% rename from module/executiondatasync/execution_data/model/entity.go rename to module/executiondatasync/execution_data/entity.go index 2a79a59c11e..6facd5ad580 100644 --- a/module/executiondatasync/execution_data/model/entity.go +++ b/module/executiondatasync/execution_data/entity.go @@ -1,4 +1,4 @@ -package model +package execution_data import ( "github.com/onflow/flow-go/model/flow" diff --git a/module/executiondatasync/execution_data/model/execution_data.go b/module/executiondatasync/execution_data/execution_data.go similarity index 97% rename from module/executiondatasync/execution_data/model/execution_data.go rename to module/executiondatasync/execution_data/execution_data.go index 56ef01bc7ae..fb917c27ef2 100644 --- a/module/executiondatasync/execution_data/model/execution_data.go +++ b/module/executiondatasync/execution_data/execution_data.go @@ -1,4 +1,4 @@ -package model +package execution_data import ( "github.com/onflow/flow-go/ledger" diff --git a/module/executiondatasync/execution_data/mock/downloader.go b/module/executiondatasync/execution_data/mock/downloader.go index b03181e8c06..dfeafeeffbe 100644 --- a/module/executiondatasync/execution_data/mock/downloader.go +++ b/module/executiondatasync/execution_data/mock/downloader.go @@ -6,7 +6,7 @@ import ( context "context" flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" mock "github.com/stretchr/testify/mock" ) diff --git a/module/executiondatasync/execution_data/mock/execution_data_store.go b/module/executiondatasync/execution_data/mock/execution_data_store.go index cbb9ea1d3ee..c11c0f1cbce 100644 --- a/module/executiondatasync/execution_data/mock/execution_data_store.go +++ b/module/executiondatasync/execution_data/mock/execution_data_store.go @@ -6,7 +6,7 @@ import ( context "context" flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" mock "github.com/stretchr/testify/mock" ) diff --git a/module/executiondatasync/execution_data/serializer.go b/module/executiondatasync/execution_data/serializer.go index 9bf21bd661e..a5dfa60252c 100644 --- a/module/executiondatasync/execution_data/serializer.go +++ b/module/executiondatasync/execution_data/serializer.go @@ -11,7 +11,6 @@ import ( "github.com/onflow/flow-go/model/encoding" "github.com/onflow/flow-go/model/encoding/cbor" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/compressor" ) @@ -54,7 +53,7 @@ func getCode(v interface{}) (byte, error) { switch v.(type) { case *flow.BlockExecutionDataRoot: return codeExecutionDataRoot, nil - case *model.ChunkExecutionData: + case *ChunkExecutionData: return codeChunkExecutionData, nil case []cid.Cid: return codeRecursiveCIDs, nil @@ -70,7 +69,7 @@ func getPrototype(code byte) (interface{}, error) { case codeExecutionDataRoot: return &flow.BlockExecutionDataRoot{}, nil case codeChunkExecutionData: - return &model.ChunkExecutionData{}, nil + return &ChunkExecutionData{}, nil case codeRecursiveCIDs: return &[]cid.Cid{}, nil default: diff --git a/module/executiondatasync/execution_data/store.go b/module/executiondatasync/execution_data/store.go index a72e9590306..8d31a8a0c4f 100644 --- a/module/executiondatasync/execution_data/store.go +++ b/module/executiondatasync/execution_data/store.go @@ -10,7 +10,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // ExecutionDataGetter handles getting execution data from a blobstore @@ -20,7 +19,7 @@ type ExecutionDataGetter interface { // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized // - BlobSizeLimitExceededError if some blob in the blob tree exceeds the maximum allowed size - Get(ctx context.Context, rootID flow.Identifier) (*model.BlockExecutionData, error) + Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) } // ExecutionDataStore handles adding / getting execution data to / from a blobstore @@ -30,7 +29,7 @@ type ExecutionDataStore interface { // Add constructs a blob tree for the given BlockExecutionData, adds it to the blobstore, // then returns the root CID. // No errors are expected during normal operation. - Add(ctx context.Context, executionData *model.BlockExecutionData) (flow.Identifier, error) + Add(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) } type ExecutionDataStoreOption func(*store) @@ -55,7 +54,7 @@ func NewExecutionDataStore(blobstore blobs.Blobstore, serializer Serializer, opt s := &store{ blobstore: blobstore, serializer: serializer, - maxBlobSize: model.DefaultMaxBlobSize, + maxBlobSize: DefaultMaxBlobSize, } for _, opt := range opts { @@ -68,7 +67,7 @@ func NewExecutionDataStore(blobstore blobs.Blobstore, serializer Serializer, opt // Add constructs a blob tree for the given BlockExecutionData, adds it to the blobstore, // then returns the rootID. // No errors are expected during normal operation. -func (s *store) Add(ctx context.Context, executionData *model.BlockExecutionData) (flow.Identifier, error) { +func (s *store) Add(ctx context.Context, executionData *BlockExecutionData) (flow.Identifier, error) { executionDataRoot := &flow.BlockExecutionDataRoot{ BlockID: executionData.BlockID, ChunkExecutionDataIDs: make([]cid.Cid, len(executionData.ChunkExecutionDatas)), @@ -115,7 +114,7 @@ func (s *store) Add(ctx context.Context, executionData *model.BlockExecutionData // addChunkExecutionData constructs a blob tree for the given ChunkExecutionData, adds it to the // blobstore, and returns the root CID. // No errors are expected during normal operation. -func (s *store) addChunkExecutionData(ctx context.Context, chunkExecutionData *model.ChunkExecutionData) (cid.Cid, error) { +func (s *store) addChunkExecutionData(ctx context.Context, chunkExecutionData *ChunkExecutionData) (cid.Cid, error) { var v interface{} = chunkExecutionData // given an arbitrarily large v, split it into blobs of size up to maxBlobSize, adding them to @@ -178,7 +177,7 @@ func (s *store) addBlobs(ctx context.Context, v interface{}) ([]cid.Cid, error) // Expected errors during normal operations: // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized -func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*model.BlockExecutionData, error) { +func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*BlockExecutionData, error) { rootCid := flow.IdToCid(rootID) // first, get the root blob. it will contain a list of blobs, one for each chunk @@ -202,9 +201,9 @@ func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*model.BlockEx } // next, get each chunk blob and deserialize it - blockExecutionData := &model.BlockExecutionData{ + blockExecutionData := &BlockExecutionData{ BlockID: executionDataRoot.BlockID, - ChunkExecutionDatas: make([]*model.ChunkExecutionData, len(executionDataRoot.ChunkExecutionDataIDs)), + ChunkExecutionDatas: make([]*ChunkExecutionData, len(executionDataRoot.ChunkExecutionDataIDs)), } for i, chunkExecutionDataID := range executionDataRoot.ChunkExecutionDataIDs { @@ -223,7 +222,7 @@ func (s *store) Get(ctx context.Context, rootID flow.Identifier) (*model.BlockEx // Expected errors during normal operations: // - BlobNotFoundError if some CID in the blob tree could not be found from the blobstore // - MalformedDataError if some level of the blob tree cannot be properly deserialized -func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID cid.Cid) (*model.ChunkExecutionData, error) { +func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID cid.Cid) (*ChunkExecutionData, error) { cids := []cid.Cid{chunkExecutionDataID} // given a root CID, get the blob tree level by level, until we reach the full ChunkExecutionData @@ -234,7 +233,7 @@ func (s *store) getChunkExecutionData(ctx context.Context, chunkExecutionDataID } switch v := v.(type) { - case *model.ChunkExecutionData: + case *ChunkExecutionData: return v, nil case *[]cid.Cid: cids = *v diff --git a/module/executiondatasync/execution_data/store_test.go b/module/executiondatasync/execution_data/store_test.go index ab6fe3c0130..f1784201766 100644 --- a/module/executiondatasync/execution_data/store_test.go +++ b/module/executiondatasync/execution_data/store_test.go @@ -20,7 +20,6 @@ import ( "github.com/onflow/flow-go/ledger/common/testutils" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/utils/unittest" ) @@ -32,8 +31,8 @@ func getExecutionDataStore(blobstore blobs.Blobstore, serializer execution_data. return execution_data.NewExecutionDataStore(blobstore, serializer) } -func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *model.ChunkExecutionData { - ced := &model.ChunkExecutionData{ +func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *execution_data.ChunkExecutionData { + ced := &execution_data.ChunkExecutionData{ TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8), } @@ -59,10 +58,10 @@ func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *model.C } } -func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *model.BlockExecutionData { - bed := &model.BlockExecutionData{ +func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData { + bed := &execution_data.BlockExecutionData{ BlockID: unittest.IdentifierFixture(), - ChunkExecutionDatas: make([]*model.ChunkExecutionData, numChunks), + ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, numChunks), } for i := 0; i < numChunks; i++ { @@ -85,7 +84,7 @@ func getAllKeys(t *testing.T, bs blobs.Blobstore) []cid.Cid { return cids } -func deepEqual(t *testing.T, expected, actual *model.BlockExecutionData) { +func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) { assert.Equal(t, expected.BlockID, actual.BlockID) assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas)) @@ -112,8 +111,8 @@ func TestHappyPath(t *testing.T) { deepEqual(t, expected, actual) } - test(1, 0) // small execution data (single level blob tree) - test(5, 5*model.DefaultMaxBlobSize) // large execution data (multi level blob tree) + test(1, 0) // small execution data (single level blob tree) + test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree) } type randomSerializer struct{} @@ -141,7 +140,7 @@ func newCorruptedTailSerializer(numChunks int) *corruptedTailSerializer { } func (cts *corruptedTailSerializer) Serialize(w io.Writer, v interface{}) error { - if _, ok := v.(*model.ChunkExecutionData); ok { + if _, ok := v.(*execution_data.ChunkExecutionData); ok { cts.i++ if cts.i == cts.corruptedChunk { buf := &bytes.Buffer{} @@ -169,7 +168,7 @@ func (cts *corruptedTailSerializer) Deserialize(r io.Reader) (interface{}, error func TestMalformedData(t *testing.T) { t.Parallel() - test := func(bed *model.BlockExecutionData, serializer execution_data.Serializer) { + test := func(bed *execution_data.BlockExecutionData, serializer execution_data.Serializer) { blobstore := getBlobstore() defaultEds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer) malformedEds := getExecutionDataStore(blobstore, serializer) @@ -180,7 +179,7 @@ func TestMalformedData(t *testing.T) { } numChunks := 5 - bed := generateBlockExecutionData(t, numChunks, 10*model.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, numChunks, 10*execution_data.DefaultMaxBlobSize) test(bed, &randomSerializer{}) // random bytes test(bed, newCorruptedTailSerializer(numChunks)) // serialized execution data with random bytes replaced at the end of a random chunk @@ -192,7 +191,7 @@ func TestGetIncompleteData(t *testing.T) { blobstore := getBlobstore() eds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer) - bed := generateBlockExecutionData(t, 5, 10*model.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, 5, 10*execution_data.DefaultMaxBlobSize) rootID, err := eds.Add(context.Background(), bed) require.NoError(t, err) diff --git a/module/executiondatasync/execution_data/util.go b/module/executiondatasync/execution_data/util.go index cf795d4a57b..50582d19840 100644 --- a/module/executiondatasync/execution_data/util.go +++ b/module/executiondatasync/execution_data/util.go @@ -5,12 +5,11 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" ) // CalculateID calculates the root ID of the given execution data without storing any data. // No errors are expected during normal operation. -func CalculateID(ctx context.Context, execData *model.BlockExecutionData, serializer Serializer) (flow.Identifier, error) { +func CalculateID(ctx context.Context, execData *BlockExecutionData, serializer Serializer) (flow.Identifier, error) { executionDatastore := NewExecutionDataStore(blobs.NewNoopBlobstore(), serializer) id, err := executionDatastore.Add(ctx, execData) diff --git a/module/executiondatasync/provider/provider.go b/module/executiondatasync/provider/provider.go index 9d9f80c9329..ac5c3fe700d 100644 --- a/module/executiondatasync/provider/provider.go +++ b/module/executiondatasync/provider/provider.go @@ -15,7 +15,6 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/tracker" "github.com/onflow/flow-go/network" ) @@ -30,7 +29,7 @@ func WithBlobSizeLimit(size int) ProviderOption { // Provider is used to provide execution data blobs over the network via a blob service. type Provider interface { - Provide(ctx context.Context, blockHeight uint64, executionData *model.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) + Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) } type ExecutionDataProvider struct { @@ -55,7 +54,7 @@ func NewProvider( p := &ExecutionDataProvider{ logger: logger.With().Str("component", "execution_data_provider").Logger(), metrics: metrics, - maxBlobSize: model.DefaultMaxBlobSize, + maxBlobSize: execution_data.DefaultMaxBlobSize, cidsProvider: NewExecutionDataCIDProvider(serializer), blobService: blobService, storage: storage, @@ -124,7 +123,7 @@ func (p *ExecutionDataProvider) storeBlobs(parent context.Context, blockHeight u // It computes and returns the root CID of the execution data blob tree. // This function returns once the root CID has been computed, and all blobs are successfully stored // in the Bitswap Blobstore. -func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, executionData *model.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) { +func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, error) { rootID, rootData, errCh, err := p.provide(ctx, blockHeight, executionData) storeErr, ok := <-errCh @@ -143,7 +142,7 @@ func (p *ExecutionDataProvider) Provide(ctx context.Context, blockHeight uint64, return rootID, rootData, nil } -func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, executionData *model.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, <-chan error, error) { +func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, executionData *execution_data.BlockExecutionData) (flow.Identifier, *flow.BlockExecutionDataRoot, <-chan error, error) { logger := p.logger.With().Uint64("height", blockHeight).Str("block_id", executionData.BlockID.String()).Logger() logger.Debug().Msg("providing execution data") @@ -196,7 +195,7 @@ func (p *ExecutionDataProvider) provide(ctx context.Context, blockHeight uint64, func NewExecutionDataCIDProvider(serializer execution_data.Serializer) *ExecutionDataCIDProvider { return &ExecutionDataCIDProvider{ serializer: serializer, - maxBlobSize: model.DefaultMaxBlobSize, + maxBlobSize: execution_data.DefaultMaxBlobSize, } } @@ -212,7 +211,7 @@ func (p *ExecutionDataCIDProvider) CalculateExecutionDataRootID( } func (p *ExecutionDataCIDProvider) CalculateChunkExecutionDataID( - ced model.ChunkExecutionData, + ced execution_data.ChunkExecutionData, ) (cid.Cid, error) { return p.addChunkExecutionData(&ced, nil) } @@ -244,7 +243,7 @@ func (p *ExecutionDataCIDProvider) addExecutionDataRoot( } func (p *ExecutionDataCIDProvider) addChunkExecutionData( - ced *model.ChunkExecutionData, + ced *execution_data.ChunkExecutionData, blobCh chan<- blobs.Blob, ) (cid.Cid, error) { cids, err := p.addBlobs(ced, blobCh) diff --git a/module/executiondatasync/provider/provider_test.go b/module/executiondatasync/provider/provider_test.go index 1117bd1b8ac..b88033a7de1 100644 --- a/module/executiondatasync/provider/provider_test.go +++ b/module/executiondatasync/provider/provider_test.go @@ -17,7 +17,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/blobs" "github.com/onflow/flow-go/module/executiondatasync/execution_data" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/executiondatasync/provider" mocktracker "github.com/onflow/flow-go/module/executiondatasync/tracker/mock" "github.com/onflow/flow-go/module/metrics" @@ -53,8 +52,8 @@ func getProvider(blobService network.BlobService) provider.Provider { ) } -func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *model.BlockExecutionData { - chunkData := make([]*model.ChunkExecutionData, 0, numChunks) +func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData { + chunkData := make([]*execution_data.ChunkExecutionData, 0, numChunks) for i := 0; i < numChunks; i++ { chunkData = append(chunkData, unittest.ChunkExecutionDataFixture(t, int(minSerializedSizePerChunk))) } @@ -62,7 +61,7 @@ func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePe return unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(chunkData...)) } -func deepEqual(t *testing.T, expected, actual *model.BlockExecutionData) { +func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) { assert.Equal(t, expected.BlockID, actual.BlockID) assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas)) @@ -95,14 +94,14 @@ func TestHappyPath(t *testing.T) { assert.Len(t, executionDataRoot.ChunkExecutionDataIDs, numChunks) } - test(1, 0) // small execution data (single level blob tree) - test(5, 5*model.DefaultMaxBlobSize) // large execution data (multi level blob tree) + test(1, 0) // small execution data (single level blob tree) + test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree) } func TestProvideContextCanceled(t *testing.T) { t.Parallel() - bed := generateBlockExecutionData(t, 5, 5*model.DefaultMaxBlobSize) + bed := generateBlockExecutionData(t, 5, 5*execution_data.DefaultMaxBlobSize) provider := getProvider(getBlobservice(getDatastore())) _, _, err := provider.Provide(context.Background(), 0, bed) @@ -152,7 +151,7 @@ func TestCalculateChunkExecutionDataID(t *testing.T) { require.NoError(t, err) expected := cid.MustParse("QmYSvEvCYCaMJXjCdWLzFYqMBzxgiE5GzEGQCKqHKM8KkP") - ced := model.ChunkExecutionData{ + ced := execution_data.ChunkExecutionData{ Collection: &flow.Collection{ Transactions: []*flow.TransactionBody{ {Script: []byte("pub fun main() {}")}, diff --git a/module/mempool/execution_data.go b/module/mempool/execution_data.go index fff3c03d238..88d466c146b 100644 --- a/module/mempool/execution_data.go +++ b/module/mempool/execution_data.go @@ -2,7 +2,7 @@ package mempool import ( "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" ) // ExecutionData represents a concurrency-safe memory pool for BlockExecutionData. @@ -14,7 +14,7 @@ type ExecutionData interface { // Add adds a block execution data to the mempool, keyed by block ID. // It returns false if the execution data was already in the mempool. - Add(*model.BlockExecutionDataEntity) bool + Add(*execution_data.BlockExecutionDataEntity) bool // Remove removes block execution data from mempool by block ID. // It returns true if the execution data was known and removed. @@ -22,14 +22,14 @@ type ExecutionData interface { // ByID returns the block execution data for the given block ID from the mempool. // It returns false if the execution data was not found in the mempool. - ByID(flow.Identifier) (*model.BlockExecutionDataEntity, bool) + ByID(flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) // Size return the current size of the memory pool. Size() uint // All retrieves all execution data that are currently in the memory pool // as a slice. - All() []*model.BlockExecutionDataEntity + All() []*execution_data.BlockExecutionDataEntity // Clear removes all execution data from the mempool. Clear() diff --git a/module/mempool/herocache/backdata/cache.go b/module/mempool/herocache/backdata/cache.go index d353fda34a3..2ac93e38957 100644 --- a/module/mempool/herocache/backdata/cache.go +++ b/module/mempool/herocache/backdata/cache.go @@ -11,7 +11,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" - "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/logging" ) @@ -150,12 +149,6 @@ func NewCache(sizeLimit uint32, return bd } -// NewCacheWithNoopLoggerAndMetrics is a helper function to create a new Cache with a no-op logger and metrics collector. -// This is useful for lower-level components that do not need to log or collect metrics, and want to avoid the overhead of import cycles. -func NewCacheWithNoopLoggerAndMetrics(sizeLimit uint32, oversizeFactor uint32, ejectionMode heropool.EjectionMode, opts ...CacheOpt) *Cache { - return NewCache(sizeLimit, oversizeFactor, ejectionMode, zerolog.Nop(), metrics.NewNoopCollector(), opts...) -} - // Has checks if backdata already contains the entity with the given identifier. func (c *Cache) Has(entityID flow.Identifier) bool { defer c.logTelemetry() diff --git a/module/mempool/herocache/execution_data.go b/module/mempool/herocache/execution_data.go index a36753a2c2d..9a075692578 100644 --- a/module/mempool/herocache/execution_data.go +++ b/module/mempool/herocache/execution_data.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/herocache/internal" @@ -39,14 +39,14 @@ func (t *BlockExecutionData) Has(blockID flow.Identifier) bool { // Add adds a block execution data to the mempool, keyed by block ID. // It returns false if the execution data was already in the mempool. -func (t *BlockExecutionData) Add(ed *model.BlockExecutionDataEntity) bool { +func (t *BlockExecutionData) Add(ed *execution_data.BlockExecutionDataEntity) bool { entity := internal.NewWrappedEntity(ed.BlockID, ed) return t.c.Add(*entity) } // ByID returns the block execution data for the given block ID from the mempool. // It returns false if the execution data was not found in the mempool. -func (t *BlockExecutionData) ByID(blockID flow.Identifier) (*model.BlockExecutionDataEntity, bool) { +func (t *BlockExecutionData) ByID(blockID flow.Identifier) (*execution_data.BlockExecutionDataEntity, bool) { entity, exists := t.c.ByID(blockID) if !exists { return nil, false @@ -57,9 +57,9 @@ func (t *BlockExecutionData) ByID(blockID flow.Identifier) (*model.BlockExecutio // All returns all block execution data from the mempool. Since it is using the HeroCache, All guarantees returning // all block execution data in the same order as they are added. -func (t *BlockExecutionData) All() []*model.BlockExecutionDataEntity { +func (t *BlockExecutionData) All() []*execution_data.BlockExecutionDataEntity { entities := t.c.All() - eds := make([]*model.BlockExecutionDataEntity, 0, len(entities)) + eds := make([]*execution_data.BlockExecutionDataEntity, 0, len(entities)) for _, entity := range entities { eds = append(eds, unwrap(entity)) } @@ -83,13 +83,13 @@ func (t *BlockExecutionData) Remove(blockID flow.Identifier) bool { } // unwrap converts an internal.WrappedEntity to a BlockExecutionDataEntity. -func unwrap(entity flow.Entity) *model.BlockExecutionDataEntity { +func unwrap(entity flow.Entity) *execution_data.BlockExecutionDataEntity { wrappedEntity, ok := entity.(internal.WrappedEntity) if !ok { panic(fmt.Sprintf("invalid wrapped entity in block execution data pool (%T)", entity)) } - ed, ok := wrappedEntity.Entity.(*model.BlockExecutionDataEntity) + ed, ok := wrappedEntity.Entity.(*execution_data.BlockExecutionDataEntity) if !ok { panic(fmt.Sprintf("invalid entity in block execution data pool (%T)", wrappedEntity.Entity)) } diff --git a/module/mempool/herocache/execution_data_test.go b/module/mempool/herocache/execution_data_test.go index 8d32de2e081..46c0d302956 100644 --- a/module/mempool/herocache/execution_data_test.go +++ b/module/mempool/herocache/execution_data_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/utils/unittest" @@ -70,7 +70,7 @@ func TestBlockExecutionDataConcurrentWriteAndRead(t *testing.T) { // storing all cache for i := 0; i < total; i++ { - go func(ed *model.BlockExecutionDataEntity) { + go func(ed *execution_data.BlockExecutionDataEntity) { require.True(t, cache.Add(ed)) wg.Done() @@ -83,7 +83,7 @@ func TestBlockExecutionDataConcurrentWriteAndRead(t *testing.T) { wg.Add(total) // reading all cache for i := 0; i < total; i++ { - go func(ed *model.BlockExecutionDataEntity) { + go func(ed *execution_data.BlockExecutionDataEntity) { actual, ok := cache.ByID(ed.BlockID) require.True(t, ok) require.Equal(t, ed, actual) diff --git a/module/mempool/mock/execution_data.go b/module/mempool/mock/execution_data.go index d25f1c24c53..9a9b1669daf 100644 --- a/module/mempool/mock/execution_data.go +++ b/module/mempool/mock/execution_data.go @@ -4,7 +4,7 @@ package mempool import ( flow "github.com/onflow/flow-go/model/flow" - execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + execution_data "github.com/onflow/flow-go/module/executiondatasync/execution_data" mock "github.com/stretchr/testify/mock" ) diff --git a/module/metrics/access.go b/module/metrics/access.go index 64673a22bd7..1116f87f433 100644 --- a/module/metrics/access.go +++ b/module/metrics/access.go @@ -6,7 +6,6 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/counters" - "github.com/onflow/flow-go/module/metrics/internal" ) type AccessCollectorOpts func(*AccessCollector) @@ -54,56 +53,56 @@ func NewAccessCollector(opts ...AccessCollectorOpts) *AccessCollector { ac := &AccessCollector{ connectionReused: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_reused", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of times connections get reused", }), connectionsInPool: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "connections_in_pool", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of connections in the pool against max number tne pool can hold", }, []string{"result"}), connectionAdded: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_added", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of times connections are added to the pool", }), connectionEstablished: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_established", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of times connections are established", }), connectionInvalidated: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_invalidated", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of times connections are invalidated", }), connectionUpdated: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_updated", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of times existing connections from the pool are updated", }), connectionEvicted: promauto.NewCounter(prometheus.CounterOpts{ Name: "connection_evicted", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemConnectionPool, + Namespace: namespaceAccess, + Subsystem: subsystemConnectionPool, Help: "counter for the number of times a cached connection is evicted from the connection pool", }), lastFullBlockHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "last_full_finalized_block_height", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceAccess, + Subsystem: subsystemIngestion, Help: "gauge to track the highest consecutive finalized block height with all collections indexed", }), maxReceiptHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "max_receipt_height", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceAccess, + Subsystem: subsystemIngestion, Help: "gauge to track the maximum block height of execution receipts received", }), maxReceiptHeightValue: counters.NewMonotonousCounter(0), diff --git a/module/metrics/alsp.go b/module/metrics/alsp.go index 2c459231004..3d5dc2bc510 100644 --- a/module/metrics/alsp.go +++ b/module/metrics/alsp.go @@ -4,7 +4,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) // AlspMetrics is a struct that contains all the metrics related to the ALSP module. @@ -26,8 +25,8 @@ func NewAlspMetrics() *AlspMetrics { alsp.reportedMisbehaviorCount = prometheus.NewCounterVec( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemAlsp, + Namespace: namespaceNetwork, + Subsystem: subsystemAlsp, Name: "reported_misbehavior_total", Help: "number of reported spamming misbehavior received by alsp", }, []string{LabelChannel, LabelMisbehavior}, diff --git a/module/metrics/badger.go b/module/metrics/badger.go index be91f1afed4..4b643336170 100644 --- a/module/metrics/badger.go +++ b/module/metrics/badger.go @@ -6,39 +6,37 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" - - "github.com/onflow/flow-go/module/metrics/internal" ) func RegisterBadgerMetrics() error { expvarCol := collectors.NewExpvarCollector(map[string]*prometheus.Desc{ "badger_disk_reads_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_disk_reads_total", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of reads", nil, nil), + fmt.Sprintf("%s_%s_disk_reads_total", namespaceStorage, subsystemBadger), "cumulative number of reads", nil, nil), "badger_disk_writes_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_disk_writes_total", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of writes", nil, nil), + fmt.Sprintf("%s_%s_disk_writes_total", namespaceStorage, subsystemBadger), "cumulative number of writes", nil, nil), "badger_read_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_read_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of bytes read", nil, nil), + fmt.Sprintf("%s_%s_read_bytes", namespaceStorage, subsystemBadger), "cumulative number of bytes read", nil, nil), "badger_written_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_written_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "cumulative number of bytes written", nil, nil), + fmt.Sprintf("%s_%s_written_bytes", namespaceStorage, subsystemBadger), "cumulative number of bytes written", nil, nil), "badger_gets_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_gets_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of gets", nil, nil), + fmt.Sprintf("%s_%s_gets_total", namespaceStorage, subsystemBadger), "number of gets", nil, nil), "badger_memtable_gets_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_memtable_gets_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of memtable gets", nil, nil), + fmt.Sprintf("%s_%s_memtable_gets_total", namespaceStorage, subsystemBadger), "number of memtable gets", nil, nil), "badger_puts_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_puts_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of puts", nil, nil), + fmt.Sprintf("%s_%s_puts_total", namespaceStorage, subsystemBadger), "number of puts", nil, nil), // NOTE: variable exists, but not used in badger yet //"badger_blocked_puts_total": prometheus.NewDesc( // fmt.Sprintf("%s_%s_blocked_puts_total", namespaceStorage, subsystemBadger), "number of blocked puts", nil, nil), "badger_pending_writes_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_badger_pending_writes_total", internal.NamespaceStorage, internal.SubsystemBadger), "tracks the number of pending writes", []string{"path"}, nil), + fmt.Sprintf("%s_%s_badger_pending_writes_total", namespaceStorage, subsystemBadger), "tracks the number of pending writes", []string{"path"}, nil), "badger_lsm_bloom_hits_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_lsm_bloom_hits_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of LSM bloom hits", []string{"level"}, nil), + fmt.Sprintf("%s_%s_lsm_bloom_hits_total", namespaceStorage, subsystemBadger), "number of LSM bloom hits", []string{"level"}, nil), "badger_lsm_level_gets_total": prometheus.NewDesc( - fmt.Sprintf("%s_%s_lsm_level_gets_total", internal.NamespaceStorage, internal.SubsystemBadger), "number of LSM gets", []string{"level"}, nil), + fmt.Sprintf("%s_%s_lsm_level_gets_total", namespaceStorage, subsystemBadger), "number of LSM gets", []string{"level"}, nil), "badger_lsm_size_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_lsm_size_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "size of the LSM in bytes", []string{"path"}, nil), + fmt.Sprintf("%s_%s_lsm_size_bytes", namespaceStorage, subsystemBadger), "size of the LSM in bytes", []string{"path"}, nil), "badger_vlog_size_bytes": prometheus.NewDesc( - fmt.Sprintf("%s_%s_vlog_size_bytes", internal.NamespaceStorage, internal.SubsystemBadger), "size of the value log in bytes", []string{"path"}, nil), + fmt.Sprintf("%s_%s_vlog_size_bytes", namespaceStorage, subsystemBadger), "size of the value log in bytes", []string{"path"}, nil), }) err := prometheus.Register(expvarCol) diff --git a/module/metrics/bitswap.go b/module/metrics/bitswap.go index 4b73442416c..d279e9f7292 100644 --- a/module/metrics/bitswap.go +++ b/module/metrics/bitswap.go @@ -3,8 +3,6 @@ package metrics import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/onflow/flow-go/module/metrics/internal" ) type BitswapCollector struct { @@ -23,56 +21,56 @@ func NewBitswapCollector() *BitswapCollector { bc := &BitswapCollector{ peers: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "num_peers", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the number of connected peers", }, []string{"prefix"}), wantlist: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "wantlist_size", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the wantlist size", }, []string{"prefix"}), blobsReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "blobs_received", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the number of received blobs", }, []string{"prefix"}), dataReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "data_received", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the amount of data received", }, []string{"prefix"}), blobsSent: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "blobs_sent", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the number of sent blobs", }, []string{"prefix"}), dataSent: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "data_sent", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the amount of data sent", }, []string{"prefix"}), dupBlobsReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "dup_blobs_received", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the number of duplicate blobs received", }, []string{"prefix"}), dupDataReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "dup_data_received", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the amount of duplicate data received", }, []string{"prefix"}), messagesReceived: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "messages_received", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemBitswap, + Namespace: namespaceNetwork, + Subsystem: subsystemBitswap, Help: "the number of messages received", }, []string{"prefix"}), } diff --git a/module/metrics/cache.go b/module/metrics/cache.go index b432e5d7bff..3337d683a24 100644 --- a/module/metrics/cache.go +++ b/module/metrics/cache.go @@ -5,7 +5,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics/internal" ) type CacheCollector struct { @@ -21,32 +20,32 @@ func NewCacheCollector(chain flow.ChainID) *CacheCollector { entries: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "entries_total", - Namespace: internal.NamespaceStorage, - Subsystem: internal.SubsystemCache, + Namespace: namespaceStorage, + Subsystem: subsystemCache, Help: "the number of entries in the cache", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), hits: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "hits_total", - Namespace: internal.NamespaceStorage, - Subsystem: internal.SubsystemCache, + Namespace: namespaceStorage, + Subsystem: subsystemCache, Help: "the number of hits for the cache", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), notfounds: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "notfounds_total", - Namespace: internal.NamespaceStorage, - Subsystem: internal.SubsystemCache, + Namespace: namespaceStorage, + Subsystem: subsystemCache, Help: "the number of times the queried item was not found in either cache or database", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), misses: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "misses_total", - Namespace: internal.NamespaceStorage, - Subsystem: internal.SubsystemCache, + Namespace: namespaceStorage, + Subsystem: subsystemCache, Help: "the number of times the queried item was not found in cache, but found in database", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{LabelResource}), diff --git a/module/metrics/chainsync.go b/module/metrics/chainsync.go index c785bc37806..b58718ce81d 100644 --- a/module/metrics/chainsync.go +++ b/module/metrics/chainsync.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/model/chainsync" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics/internal" ) type ChainSyncCollector struct { @@ -26,40 +25,40 @@ func NewChainSyncCollector(chainID flow.ChainID) *ChainSyncCollector { chainID: chainID, timeToPruned: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "time_to_pruned_seconds", - Namespace: internal.NamespaceChainsync, - Subsystem: internal.SubsystemSyncCore, + Namespace: namespaceChainsync, + Subsystem: subsystemSyncCore, Help: "the time between queueing and pruning a block in seconds", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, }, []string{"status", "requested_by"}), timeToReceived: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "time_to_received_seconds", - Namespace: internal.NamespaceChainsync, - Subsystem: internal.SubsystemSyncCore, + Namespace: namespaceChainsync, + Subsystem: subsystemSyncCore, Help: "the time between queueing and receiving a block in seconds", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, }, []string{"requested_by"}), totalPruned: prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "blocks_pruned_total", - Namespace: internal.NamespaceChainsync, - Subsystem: internal.SubsystemSyncCore, + Namespace: namespaceChainsync, + Subsystem: subsystemSyncCore, Help: "the total number of blocks pruned by 'id' or 'height'", }, []string{"requested_by"}), storedBlocks: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Name: "blocks_stored_total", - Namespace: internal.NamespaceChainsync, - Subsystem: internal.SubsystemSyncCore, + Namespace: namespaceChainsync, + Subsystem: subsystemSyncCore, Help: "the total number of blocks currently stored by 'id' or 'height'", }, []string{"requested_by"}), totalHeightsRequested: prometheus.NewCounter(prometheus.CounterOpts{ Name: "block_heights_requested_total", - Namespace: internal.NamespaceChainsync, - Subsystem: internal.SubsystemSyncCore, + Namespace: namespaceChainsync, + Subsystem: subsystemSyncCore, Help: "the total number of blocks requested by height, including retried requests for the same heights. Eg: a range of 1-10 would increase the counter by 10", }), totalIdsRequested: prometheus.NewCounter(prometheus.CounterOpts{ Name: "block_ids_requested_total", - Namespace: internal.NamespaceChainsync, - Subsystem: internal.SubsystemSyncCore, + Namespace: namespaceChainsync, + Subsystem: subsystemSyncCore, Help: "the total number of blocks requested by id", }), } diff --git a/module/metrics/cleaner.go b/module/metrics/cleaner.go index ed8fc946c8c..57bddb9de71 100644 --- a/module/metrics/cleaner.go +++ b/module/metrics/cleaner.go @@ -5,8 +5,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/onflow/flow-go/module/metrics/internal" ) type CleanerCollector struct { @@ -16,8 +14,8 @@ type CleanerCollector struct { func NewCleanerCollector() *CleanerCollector { cc := &CleanerCollector{ gcDuration: promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceStorage, - Subsystem: internal.SubsystemBadger, + Namespace: namespaceStorage, + Subsystem: subsystemBadger, Name: "garbage_collection_runtime_s", Buckets: []float64{1, 10, 60, 60 * 5, 60 * 15}, Help: "the time spent on badger garbage collection", diff --git a/module/metrics/collection.go b/module/metrics/collection.go index dafe665e174..19be622f0ab 100644 --- a/module/metrics/collection.go +++ b/module/metrics/collection.go @@ -7,7 +7,6 @@ import ( "github.com/onflow/flow-go/model/cluster" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type CollectionCollector struct { @@ -24,29 +23,29 @@ func NewCollectionCollector(tracer module.Tracer) *CollectionCollector { tracer: tracer, transactionsIngested: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceCollection, + Namespace: namespaceCollection, Name: "ingested_transactions_total", Help: "count of transactions ingested by this node", }), finalizedHeight: promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: internal.NamespaceCollection, - Subsystem: internal.SubsystemProposal, + Namespace: namespaceCollection, + Subsystem: subsystemProposal, Name: "finalized_height", Help: "tracks the latest finalized height", }, []string{LabelChain}), proposals: promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: internal.NamespaceCollection, - Subsystem: internal.SubsystemProposal, + Namespace: namespaceCollection, + Subsystem: subsystemProposal, Buckets: []float64{1, 2, 5, 10, 20}, Name: "proposals_size_transactions", Help: "size/number of proposed collections", }, []string{LabelChain}), guarantees: promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: internal.NamespaceCollection, - Subsystem: internal.SubsystemProposal, + Namespace: namespaceCollection, + Subsystem: subsystemProposal, Buckets: []float64{1, 2, 5, 10, 20}, Name: "guarantees_size_transactions", Help: "size/number of guaranteed/finalized collections", diff --git a/module/metrics/compliance.go b/module/metrics/compliance.go index e3027ee7543..de74b79cfcf 100644 --- a/module/metrics/compliance.go +++ b/module/metrics/compliance.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type ComplianceCollector struct { @@ -39,105 +38,105 @@ func NewComplianceCollector() *ComplianceCollector { currentEpochCounter: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_epoch_counter", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the current epoch's counter", }), currentEpochPhase: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_epoch_phase", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the current epoch's phase", }), committedEpochFinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "committed_epoch_final_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the final view of the committed epoch with the greatest counter", }), lastEpochTransitionHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "last_epoch_transition_height", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the height of the most recent finalized epoch transition; in other words the height of the first block of the current epoch", }), currentEpochFinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_epoch_final_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the final view of the current epoch", }), currentDKGPhase1FinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_dkg_phase1_final_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the final view of phase 1 of the current epochs DKG", }), currentDKGPhase2FinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_dkg_phase2_final_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the final view of phase 2 of current epochs DKG", }), currentDKGPhase3FinalView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "current_dkg_phase3_final_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the final view of phase 3 of the current epochs DKG (a successful DKG will end shortly after this view)", }), finalizedHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "finalized_height", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the last finalized height", }), sealedHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "sealed_height", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the last sealed height", }), finalizedBlocks: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "finalized_blocks_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the number of finalized blocks", }, []string{LabelProposer}), sealedBlocks: promauto.NewCounter(prometheus.CounterOpts{ Name: "sealed_blocks_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the number of sealed blocks", }), finalizedPayload: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "finalized_payload_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the number of resources in finalized blocks", }, []string{LabelResource}), sealedPayload: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "sealed_payload_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the number of resources in sealed blocks", }, []string{LabelResource}), finalizedBlocksPerSecond: promauto.NewSummary(prometheus.SummaryOpts{ Name: "finalized_blocks_per_second", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the number of finalized blocks per second/the finalized block rate", Objectives: map[float64]float64{ 0.01: 0.001, @@ -153,8 +152,8 @@ func NewComplianceCollector() *ComplianceCollector { epochEmergencyFallbackTriggered: promauto.NewGauge(prometheus.GaugeOpts{ Name: "epoch_fallback_triggered", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "indicates whether epoch emergency fallback is triggered; if >0, the fallback is triggered", }), } diff --git a/module/metrics/consensus.go b/module/metrics/consensus.go index de3538c4941..0c5229639fd 100644 --- a/module/metrics/consensus.go +++ b/module/metrics/consensus.go @@ -7,7 +7,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) // ConsensusCollector ... @@ -31,26 +30,26 @@ type ConsensusCollector struct { func NewConsensusCollector(tracer module.Tracer, registerer prometheus.Registerer) *ConsensusCollector { onReceiptDuration := prometheus.NewCounter(prometheus.CounterOpts{ Name: "push_receipts_on_receipt_duration_seconds_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemMatchEngine, + Namespace: namespaceConsensus, + Subsystem: subsystemMatchEngine, Help: "time spent in consensus matching engine's onReceipt method in seconds", }) onApprovalDuration := prometheus.NewCounter(prometheus.CounterOpts{ Name: "on_approval_duration_seconds_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemMatchEngine, + Namespace: namespaceConsensus, + Subsystem: subsystemMatchEngine, Help: "time spent in consensus matching engine's onApproval method in seconds", }) checkSealingDuration := prometheus.NewCounter(prometheus.CounterOpts{ Name: "check_sealing_duration_seconds_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemMatchEngine, + Namespace: namespaceConsensus, + Subsystem: subsystemMatchEngine, Help: "time spent in consensus matching engine's checkSealing method in seconds", }) emergencySealedBlocks := prometheus.NewCounter(prometheus.CounterOpts{ Name: "emergency_sealed_blocks_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCompliance, + Namespace: namespaceConsensus, + Subsystem: subsystemCompliance, Help: "the number of blocks sealed in emergency mode", }) registerer.MustRegister( diff --git a/module/metrics/cruisectl.go b/module/metrics/cruisectl.go index 6a459ebc810..7d56e762d50 100644 --- a/module/metrics/cruisectl.go +++ b/module/metrics/cruisectl.go @@ -5,8 +5,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/onflow/flow-go/module/metrics/internal" ) // CruiseCtlMetrics captures metrics about the Block Rate Controller, which adjusts @@ -23,32 +21,32 @@ func NewCruiseCtlMetrics() *CruiseCtlMetrics { return &CruiseCtlMetrics{ proportionalErr: promauto.NewGauge(prometheus.GaugeOpts{ Name: "proportional_err_s", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCruiseCtl, + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, Help: "The current proportional error measured by the controller", }), integralErr: promauto.NewGauge(prometheus.GaugeOpts{ Name: "integral_err_s", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCruiseCtl, + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, Help: "The current integral error measured by the controller", }), derivativeErr: promauto.NewGauge(prometheus.GaugeOpts{ Name: "derivative_err_per_s", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCruiseCtl, + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, Help: "The current derivative error measured by the controller", }), targetProposalDur: promauto.NewGauge(prometheus.GaugeOpts{ Name: "target_proposal_dur_s", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCruiseCtl, + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, Help: "The current target duration from parent to child proposal", }), controllerOutput: promauto.NewGauge(prometheus.GaugeOpts{ Name: "controller_output_s", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemCruiseCtl, + Namespace: namespaceConsensus, + Subsystem: subsystemCruiseCtl, Help: "The most recent output of the controller; the adjustment to subtract from the baseline proposal duration", }), } diff --git a/module/metrics/engine.go b/module/metrics/engine.go index 45f7e07d0ed..b846e75dcdf 100644 --- a/module/metrics/engine.go +++ b/module/metrics/engine.go @@ -2,7 +2,6 @@ package metrics import ( "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -24,36 +23,36 @@ func NewEngineCollector() *EngineCollector { sent: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "messages_sent_total", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemEngine, + Namespace: namespaceNetwork, + Subsystem: subsystemEngine, Help: "the number of messages sent by engines", }, []string{EngineLabel, LabelMessage}), received: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "messages_received_total", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemEngine, + Namespace: namespaceNetwork, + Subsystem: subsystemEngine, Help: "the number of messages received by engines", }, []string{EngineLabel, LabelMessage}), handled: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "messages_handled_total", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemEngine, + Namespace: namespaceNetwork, + Subsystem: subsystemEngine, Help: "the number of messages handled by engines", }, []string{EngineLabel, LabelMessage}), inboundDropped: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "inbound_messages_dropped_total", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemEngine, + Namespace: namespaceNetwork, + Subsystem: subsystemEngine, Help: "the number of inbound messages dropped by engines", }, []string{EngineLabel, LabelMessage}), outboundDropped: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "outbound_messages_dropped_total", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemEngine, + Namespace: namespaceNetwork, + Subsystem: subsystemEngine, Help: "the number of outbound messages dropped by engines", }, []string{EngineLabel, LabelMessage}), } diff --git a/module/metrics/execution.go b/module/metrics/execution.go index 969e12e1d06..8d7b155791e 100644 --- a/module/metrics/execution.go +++ b/module/metrics/execution.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type ExecutionCollector struct { @@ -88,436 +87,436 @@ type ExecutionCollector struct { func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { forestApproxMemorySize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "forest_approx_memory_size", Help: "an approximate size of in-memory forest in bytes", }) forestNumberOfTrees := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "forest_number_of_trees", Help: "the number of trees in memory", }) latestTrieRegCount := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "latest_trie_reg_count", Help: "the number of allocated registers (latest created trie)", }) latestTrieRegCountDiff := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "latest_trie_reg_count_diff", Help: "the difference between number of unique register allocated of the latest created trie and parent trie", }) latestTrieRegSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "latest_trie_reg_size", Help: "the size of allocated registers (latest created trie)", }) latestTrieRegSizeDiff := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "latest_trie_reg_size_diff", Help: "the difference between size of unique register allocated of the latest created trie and parent trie", }) latestTrieMaxDepthTouched := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "latest_trie_max_depth_touched", Help: "the maximum depth touched of the latest created trie", }) updatedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "updates_counted", Help: "the number of updates", }) proofSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "average_proof_size", Help: "the average size of a single generated proof in bytes", }) updatedValuesNumber := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "update_values_number", Help: "the total number of values updated", }) updatedValuesSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "update_values_size", Help: "the total size of values for single update in bytes", }) updatedDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "update_duration", Help: "the duration of update operation", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) updatedDurationPerValue := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "update_duration_per_value", Help: "the duration of update operation per value", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) readValuesNumber := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "read_values_number", Help: "the total number of values read", }) readValuesSize := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "read_values_size", Help: "the total size of values for single read in bytes", }) readDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "read_duration", Help: "the duration of read operation", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) readDurationPerValue := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemMTrie, + Namespace: namespaceExecution, + Subsystem: subsystemMTrie, Name: "read_duration_per_value", Help: "the duration of read operation per value", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, }) blockExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_execution_time_milliseconds", Help: "the total time spent on block execution in milliseconds", Buckets: []float64{100, 500, 1000, 1500, 2000, 2500, 3000, 6000}, }) blockComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_computation_used", Help: "the total amount of computation used by a block", Buckets: []float64{1000, 10000, 100000, 500000, 1000000, 5000000, 10000000}, }) blockMemoryUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_memory_used", Help: "the total amount of memory (cadence estimate) used by a block", Buckets: []float64{100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000, 500_000_000_000, 1_000_000_000_000, 5_000_000_000_000, 10_000_000_000_000}, }) blockEventCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_event_counts", Help: "the total number of events emitted during a block execution", Buckets: []float64{10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000}, }) blockEventSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_event_size", Help: "the total number of bytes used by events emitted during a block execution", Buckets: []float64{1_000, 10_000, 100_000, 500_000, 1_000_000, 5_000_000, 10_000_000, 50_000_000, 100_000_000, 500_000_000}, }) blockComputationVector := promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_execution_effort_vector", Help: "execution effort vector of the last executed block by computation kind", }, []string{LabelComputationKind}) blockCachedPrograms := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_execution_cached_programs", Help: "Number of cached programs at the end of block execution", }) blockTransactionCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_transaction_counts", Help: "the total number of transactions per block", Buckets: prometheus.ExponentialBuckets(4, 2, 10), }) blockCollectionCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "block_collection_counts", Help: "the total number of collections per block", Buckets: prometheus.ExponentialBuckets(1, 2, 8), }) collectionExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_execution_time_milliseconds", Help: "the total time spent on collection execution in milliseconds", Buckets: []float64{100, 200, 500, 1000, 1500, 2000}, }) collectionComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_computation_used", Help: "the total amount of computation used by a collection", Buckets: []float64{1000, 10000, 50000, 100000, 500000, 1000000}, }) collectionMemoryUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_memory_used", Help: "the total amount of memory used (cadence estimate) by a collection", Buckets: []float64{10_000_000, 100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000, 500_000_000_000, 1_000_000_000_000, 5_000_000_000_000}, }) collectionEventSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_event_size", Help: "the total byte size used by all events generated during a collection execution", Buckets: []float64{100, 1000, 10000, 100000, 10000000, 100000000, 1000000000}, }) collectionEventCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_event_counts", Help: "the total number of events emitted per collection", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }) collectionNumberOfRegistersTouched := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_number_of_registers_touched", Help: "the total number of registers touched during collection execution", Buckets: prometheus.ExponentialBuckets(10, 2, 12), }) collectionTotalBytesWrittenToRegisters := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_total_number_of_bytes_written_to_registers", Help: "the total number of bytes written to registers during collection execution", Buckets: prometheus.ExponentialBuckets(1000, 2, 16), }) collectionTransactionCounts := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "collection_transaction_counts", Help: "the total number of transactions per collection", Buckets: prometheus.ExponentialBuckets(4, 2, 8), }) collectionRequestsSent := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceExecution, + Subsystem: subsystemIngestion, Name: "collection_requests_sent", Help: "the number of collection requests sent", }) collectionRequestsRetries := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceExecution, + Subsystem: subsystemIngestion, Name: "collection_requests_retries", Help: "the number of collection requests retried", }) transactionParseTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_parse_time_nanoseconds", Help: "the parse time for a transaction in nanoseconds", Buckets: prometheus.ExponentialBuckets(10, 10, 8), }) transactionCheckTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_check_time_nanoseconds", Help: "the checking time for a transaction in nanoseconds", Buckets: prometheus.ExponentialBuckets(10, 10, 8), }) transactionInterpretTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_interpret_time_nanoseconds", Help: "the interpretation time for a transaction in nanoseconds", Buckets: prometheus.ExponentialBuckets(10, 10, 8), }) transactionExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_execution_time_milliseconds", Help: "the total time spent on transaction execution in milliseconds", Buckets: prometheus.ExponentialBuckets(2, 2, 10), }) transactionConflictRetries := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_conflict_retries", Help: "the number of conflict retries needed to successfully commit a transaction. If retry count is high, consider reducing concurrency", Buckets: []float64{0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 100}, }) transactionComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_computation_used", Help: "the total amount of computation used by a transaction", Buckets: []float64{50, 100, 500, 1000, 5000, 10000}, }) transactionMemoryEstimate := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_memory_estimate", Help: "the estimated memory used by a transaction", Buckets: []float64{1_000_000, 10_000_000, 100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000}, }) transactionEmittedEvents := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_emitted_events", Help: "the total number of events emitted by a transaction", Buckets: prometheus.ExponentialBuckets(2, 2, 10), }) transactionEventSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "transaction_event_size", Help: "the total number bytes used of events emitted during a transaction execution", Buckets: prometheus.ExponentialBuckets(100, 2, 12), }) scriptExecutionTime := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "script_execution_time_milliseconds", Help: "the total time spent on script execution in milliseconds", Buckets: []float64{2, 4, 8, 16, 32, 64, 100, 250, 500}, }) scriptComputationUsed := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "script_computation_used", Help: "the total amount of computation used by an script", Buckets: []float64{50, 100, 500, 1000, 5000, 10000}, }) scriptMemoryUsage := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "script_memory_usage", Help: "the total amount of memory allocated by a script", Buckets: []float64{100_000, 1_000_000, 10_000_000, 50_000_000, 100_000_000, 500_000_000, 1_000_000_000}, }) scriptMemoryEstimate := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "script_memory_estimate", Help: "the estimated memory used by a script", Buckets: []float64{1_000_000, 10_000_000, 100_000_000, 1_000_000_000, 5_000_000_000, 10_000_000_000, 50_000_000_000, 100_000_000_000}, }) scriptMemoryDifference := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "script_memory_difference", Help: "the difference in actual memory usage and estimate for a script", Buckets: []float64{-1, 0, 10_000_000, 100_000_000, 1_000_000_000}, }) chunkDataPackRequestProcessedTotal := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemProvider, + Namespace: namespaceExecution, + Subsystem: subsystemProvider, Name: "chunk_data_packs_requested_total", Help: "the total number of chunk data pack requests processed by provider engine", }) chunkDataPackProofSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceExecution, + Subsystem: subsystemIngestion, Name: "chunk_data_pack_proof_size", Help: "the total number bytes used for storing proof part of chunk data pack", Buckets: prometheus.ExponentialBuckets(1000, 2, 16), }) chunkDataPackCollectionSize := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceExecution, + Subsystem: subsystemIngestion, Name: "chunk_data_pack_collection_size", Help: "the total number transactions in the collection", Buckets: prometheus.ExponentialBuckets(1, 2, 10), }) blockDataUploadsInProgress := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemBlockDataUploader, + Namespace: namespaceExecution, + Subsystem: subsystemBlockDataUploader, Name: "block_data_upload_in_progress", Help: "number of concurrently running Block Data upload operations", }) blockDataUploadsDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemBlockDataUploader, + Namespace: namespaceExecution, + Subsystem: subsystemBlockDataUploader, Name: "block_data_upload_duration_ms", Help: "the duration of update upload operation", Buckets: []float64{1, 100, 500, 1000, 2000}, }) computationResultUploadedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemProvider, + Namespace: namespaceExecution, + Subsystem: subsystemProvider, Name: "computation_result_uploaded_count", Help: "the total count of computation result uploaded", }) computationResultUploadRetriedCount := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemProvider, + Namespace: namespaceExecution, + Subsystem: subsystemProvider, Name: "computation_result_upload_retried_count", Help: "the total count of computation result upload retried", }) @@ -583,93 +582,93 @@ func NewExecutionCollector(tracer module.Tracer) *ExecutionCollector { computationResultUploadedCount: computationResultUploadedCount, computationResultUploadRetriedCount: computationResultUploadRetriedCount, totalExecutedBlocksCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "total_executed_blocks", Help: "the total number of blocks that have been executed", }), totalExecutedCollectionsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "total_executed_collections", Help: "the total number of collections that have been executed", }), totalExecutedTransactionsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "total_executed_transactions", Help: "the total number of transactions that have been executed", }), totalFailedTransactionsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "total_failed_transactions", Help: "the total number of transactions that has failed when executed", }), totalExecutedScriptsCounter: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "total_executed_scripts", Help: "the total number of scripts that have been executed", }), lastExecutedBlockHeightGauge: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "last_executed_block_height", Help: "the last height that was executed", }), stateStorageDiskTotal: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemStateStorage, + Namespace: namespaceExecution, + Subsystem: subsystemStateStorage, Name: "data_size_bytes", Help: "the execution state size on disk in bytes", }), storageStateCommitment: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemStateStorage, + Namespace: namespaceExecution, + Subsystem: subsystemStateStorage, Name: "commitment_size_bytes", Help: "the storage size of a state commitment in bytes", }), stateSyncActive: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceExecution, + Subsystem: subsystemIngestion, Name: "state_sync_active", Help: "indicates if the state sync is active", }), numberOfAccounts: promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "number_of_accounts", Help: "the number of existing accounts on the network", }), programsCacheMiss: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "programs_cache_miss", Help: "the number of times a program was not found in the cache and had to be loaded", }), programsCacheHit: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemRuntime, + Namespace: namespaceExecution, + Subsystem: subsystemRuntime, Name: "programs_cache_hit", Help: "the number of times a program was found in the cache", }), maxCollectionHeight: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "max_collection_height", - Namespace: internal.NamespaceExecution, - Subsystem: internal.SubsystemIngestion, + Namespace: namespaceExecution, + Subsystem: subsystemIngestion, Help: "gauge to track the maximum block height of collections received", }), } diff --git a/module/metrics/execution_data_requester.go b/module/metrics/execution_data_requester.go index 1d159e9a31a..e8ccc5e3266 100644 --- a/module/metrics/execution_data_requester.go +++ b/module/metrics/execution_data_requester.go @@ -7,7 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type ExecutionDataRequesterCollector struct { @@ -26,51 +25,51 @@ type ExecutionDataRequesterCollector struct { func NewExecutionDataRequesterCollector() module.ExecutionDataRequesterMetrics { fetchDuration := promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_requester_download_duration_ms", Help: "the duration of execution data download operation", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }) downloadsInProgress := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_requester_in_progress_downloads", Help: "number of concurrently running execution data download operations", }) outstandingNotifications := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_requester_outstanding_notifications", Help: "number of execution data received notifications waiting to be processed", }) highestDownloadHeight := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_requester_highest_download_height", Help: "highest block height for which execution data has been received", }) highestNotificationHeight := promauto.NewGauge(prometheus.GaugeOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_requester_highest_notification_height", Help: "highest block height for which execution data notifications have been sent", }) downloadRetries := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_requester_download_retries_total", Help: "number of execution data download retries", }) failedDownloads := promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExecutionDataRequester, + Namespace: namespaceStateSync, + Subsystem: subsystemExecutionDataRequester, Name: "execution_data_failed_downloads_total", Help: "number of failed execution data downloads", }) diff --git a/module/metrics/execution_data_sync.go b/module/metrics/execution_data_sync.go index 0ce08dd6e19..44a49f357fb 100644 --- a/module/metrics/execution_data_sync.go +++ b/module/metrics/execution_data_sync.go @@ -6,8 +6,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/onflow/flow-go/module/metrics/internal" ) type ExecutionDataRequesterV2Collector struct { @@ -26,20 +24,20 @@ func NewExecutionDataRequesterV2Collector() *ExecutionDataRequesterV2Collector { return &ExecutionDataRequesterV2Collector{ fulfilledHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "fulfilled_height", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the latest sealed height for which all execution data has been retrieved and stored locally", }), receiptsSkipped: promauto.NewCounter(prometheus.CounterOpts{ Name: "receipts_skipped", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the number of skipped receipts", }), requestDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "request_durations_ms", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the durations of requests in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -53,14 +51,14 @@ func NewExecutionDataRequesterV2Collector() *ExecutionDataRequesterV2Collector { }), latestSuccessfulRequestHeight: promauto.NewGauge(prometheus.GaugeOpts{ Name: "latest_successful_request_height", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the block height of the latest successful request", }), executionDataSizes: promauto.NewSummary(prometheus.SummaryOpts{ Name: "execution_data_sizes_bytes", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the sizes of Block Execution Data in bytes", Objectives: map[float64]float64{ 0.01: 0.001, @@ -74,27 +72,27 @@ func NewExecutionDataRequesterV2Collector() *ExecutionDataRequesterV2Collector { }), requestAttempts: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "request_attempts", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Buckets: []float64{1, 2, 3, 4, 5}, Help: "the number of attempts before a request succeeded", }), requestsFailed: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "requests_failed", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the number of failed requests", }, []string{ExecutionDataRequestRetryable}), requestsCancelled: promauto.NewCounter(prometheus.CounterOpts{ Name: "requests_cancelled", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the number of cancelled requests", }), responsesDropped: promauto.NewCounter(prometheus.CounterOpts{ Name: "responses_dropped", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataRequester, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataRequester, Help: "the number of dropped responses", }), } @@ -140,8 +138,8 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { return &ExecutionDataProviderCollector{ computeRootIDDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "compute_root_id_durations_ms", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataProvider, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataProvider, Help: "the durations of computing root IDs in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -155,15 +153,15 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { }), numberOfChunks: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "number_of_chunks", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataProvider, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataProvider, Buckets: []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}, Help: "the number of chunks in a Block Execution Data", }), addBlobsDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "add_blobs_durations_ms", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataProvider, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataProvider, Help: "the durations of adding blobs in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -177,8 +175,8 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { }), executionDataSizes: promauto.NewSummary(prometheus.SummaryOpts{ Name: "execution_data_sizes_bytes", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataProvider, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataProvider, Help: "the sizes of Block Execution Data in bytes", Objectives: map[float64]float64{ 0.01: 0.001, @@ -192,8 +190,8 @@ func NewExecutionDataProviderCollector() *ExecutionDataProviderCollector { }), addBlobsFailed: promauto.NewCounter(prometheus.CounterOpts{ Name: "add_blobs_failed", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataProvider, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataProvider, Help: "the number of failed attempts to add blobs", }), } @@ -222,8 +220,8 @@ func NewExecutionDataPrunerCollector() *ExecutionDataPrunerCollector { return &ExecutionDataPrunerCollector{ pruneDurations: promauto.NewSummary(prometheus.SummaryOpts{ Name: "prune_durations_ms", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataPruner, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataPruner, Help: "the durations of pruning in milliseconds", Objectives: map[float64]float64{ 0.01: 0.001, @@ -237,8 +235,8 @@ func NewExecutionDataPrunerCollector() *ExecutionDataPrunerCollector { }), latestHeightPruned: promauto.NewGauge(prometheus.GaugeOpts{ Name: "latest_height_pruned", - Namespace: internal.NamespaceExecutionDataSync, - Subsystem: internal.SubsystemExeDataPruner, + Namespace: namespaceExecutionDataSync, + Subsystem: subsystemExeDataPruner, Help: "the latest height pruned", }), } diff --git a/module/metrics/gossipsub.go b/module/metrics/gossipsub.go index 26e8ee9936e..5ba5369fa0d 100644 --- a/module/metrics/gossipsub.go +++ b/module/metrics/gossipsub.go @@ -5,7 +5,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type GossipSubMetrics struct { @@ -28,8 +27,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedIHaveCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_received_ihave_total", Help: "number of received ihave messages from gossipsub protocol", }, @@ -37,8 +36,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedIWantCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_received_iwant_total", Help: "number of received iwant messages from gossipsub protocol", }, @@ -46,8 +45,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedGraftCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_received_graft_total", Help: "number of received graft messages from gossipsub protocol", }, @@ -55,8 +54,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedPruneCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_received_prune_total", Help: "number of received prune messages from gossipsub protocol", }, @@ -64,8 +63,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.incomingRpcAcceptedFullyCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_incoming_rpc_accepted_fully_total", Help: "number of incoming rpc messages accepted fully by gossipsub protocol", }, @@ -73,8 +72,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.incomingRpcAcceptedOnlyControlCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_incoming_rpc_accepted_only_control_total", Help: "number of incoming rpc messages accepted only control messages by gossipsub protocol", }, @@ -82,8 +81,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.incomingRpcRejectedCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_incoming_rpc_rejected_total", Help: "number of incoming rpc messages rejected by gossipsub protocol", }, @@ -91,8 +90,8 @@ func NewGossipSubMetrics(prefix string) *GossipSubMetrics { gs.receivedPublishMessageCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gs.prefix + "gossipsub_received_publish_message_total", Help: "number of received publish messages from gossipsub protocol", }, @@ -162,8 +161,8 @@ func NewGossipSubLocalMeshMetrics(prefix string) *GossipSubLocalMeshMetrics { return &GossipSubLocalMeshMetrics{ localMeshSize: *promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_local_mesh_size", Help: "number of peers in the local mesh of the node", }, diff --git a/module/metrics/gossipsub_rpc_validation_inspector.go b/module/metrics/gossipsub_rpc_validation_inspector.go index 6823848bdca..f4d79d4121d 100644 --- a/module/metrics/gossipsub_rpc_validation_inspector.go +++ b/module/metrics/gossipsub_rpc_validation_inspector.go @@ -7,7 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) // GossipSubRpcValidationInspectorMetrics metrics collector for the gossipsub RPC validation inspector. @@ -26,16 +25,16 @@ func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValid gc := &GossipSubRpcValidationInspectorMetrics{prefix: prefix} gc.rpcCtrlMsgInBlockingPreProcessingGauge = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gc.prefix + "control_message_in_blocking_preprocess_total", Help: "the number of rpc control messages currently being pre-processed", }, []string{LabelCtrlMsgType}, ) gc.rpcCtrlMsgBlockingProcessingTimeHistogram = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gc.prefix + "rpc_control_message_validator_blocking_preprocessing_time_seconds", Help: "duration [seconds; measured with float64 precision] of how long the rpc control message validator blocked pre-processing an rpc control message", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, @@ -43,16 +42,16 @@ func NewGossipSubRPCValidationInspectorMetrics(prefix string) *GossipSubRpcValid ) gc.rpcCtrlMsgInAsyncPreProcessingGauge = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gc.prefix + "control_messages_in_async_processing_total", Help: "the number of rpc control messages currently being processed asynchronously by workers from the rpc validator worker pool", }, []string{LabelCtrlMsgType}, ) gc.rpcCtrlMsgAsyncProcessingTimeHistogram = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: gc.prefix + "rpc_control_message_validator_async_processing_time_seconds", Help: "duration [seconds; measured with float64 precision] of how long it takes rpc control message validator to asynchronously process a rpc message", Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 7.5, 10, 20}, diff --git a/module/metrics/gossipsub_score.go b/module/metrics/gossipsub_score.go index f7e1d275907..2f574cf332b 100644 --- a/module/metrics/gossipsub_score.go +++ b/module/metrics/gossipsub_score.go @@ -7,7 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" "github.com/onflow/flow-go/network/channels" ) @@ -39,8 +38,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.peerScore = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_overall_peer_score", Help: "overall peer score from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -49,8 +48,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.appSpecificScore = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_app_specific_score", Help: "app specific score from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -59,8 +58,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.behaviourPenalty = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_behaviour_penalty_score", Help: "behaviour penalty from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -69,8 +68,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.ipCollocationFactor = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_ip_collocation_factor_score", Help: "ip collocation factor from gossipsub peer scoring", Buckets: gossipSubScoreBuckets, @@ -79,8 +78,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.timeInMesh = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_time_in_mesh_score", Help: "time in mesh from gossipsub scoring", Buckets: gossipSubScoreBuckets, @@ -90,8 +89,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.meshMessageDelivery = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_mesh_message_delivery_score", Help: "mesh message delivery from gossipsub peer scoring", }, @@ -100,8 +99,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.invalidMessageDelivery = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_invalid_message_delivery_score", Help: "invalid message delivery from gossipsub peer scoring", }, @@ -110,8 +109,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.firstMessageDelivery = *promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_first_message_delivery_score", Help: "first message delivery from gossipsub peer scoring", }, @@ -120,8 +119,8 @@ func NewGossipSubScoreMetrics(prefix string) *GossipSubScoreMetrics { gs.warningStateGauge = promauto.NewGauge( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: prefix + "gossipsub_warning_state_peers_total", Help: "number of peers in the warning state", }, diff --git a/module/metrics/herocache.go b/module/metrics/herocache.go index 7396a775bf8..f82cd84bb57 100644 --- a/module/metrics/herocache.go +++ b/module/metrics/herocache.go @@ -6,7 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" + "github.com/onflow/flow-go/network" ) const subsystemHeroCache = "hero_cache" @@ -64,24 +64,143 @@ func NewNoopHeroCacheMetricsFactory() HeroCacheMetricsFactory { } } +func NetworkReceiveCacheMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingReceiveCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +// DisallowListCacheMetricsFactory is the factory method for creating a new HeroCacheCollector for the disallow list cache. +// The disallow-list cache is used to keep track of peers that are disallow-listed and the reasons for it. +// Args: +// - f: the HeroCacheMetricsFactory to create the collector +// - networkingType: the networking type of the cache, i.e., whether it is used for the public or the private network +// Returns: +// - a HeroCacheMetrics for the disallow list cache +func DisallowListCacheMetricsFactory(f HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingDisallowListCache + if networkingType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func NetworkDnsTxtCacheMetricsFactory(registrar prometheus.Registerer) *HeroCacheCollector { + return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDnsTxtCache, registrar) +} + +func NetworkDnsIpCacheMetricsFactory(registrar prometheus.Registerer) *HeroCacheCollector { + return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDnsIpCache, registrar) +} + func ChunkDataPackRequestQueueMetricsFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(internal.NamespaceExecution, ResourceChunkDataPackRequests, registrar) + return NewHeroCacheCollector(namespaceExecution, ResourceChunkDataPackRequests, registrar) } func ReceiptRequestsQueueMetricFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(internal.NamespaceExecution, ResourceReceipt, registrar) + return NewHeroCacheCollector(namespaceExecution, ResourceReceipt, registrar) } func CollectionRequestsQueueMetricFactory(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(internal.NamespaceCollection, ResourceCollection, registrar) + return NewHeroCacheCollector(namespaceCollection, ResourceCollection, registrar) +} + +func DisallowListNotificationQueueMetricFactory(registrar prometheus.Registerer) *HeroCacheCollector { + return NewHeroCacheCollector(namespaceNetwork, ResourceNetworkingDisallowListNotificationQueue, registrar) +} + +func ApplicationLayerSpamRecordCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingApplicationLayerSpamRecordCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + + return f(namespaceNetwork, r) +} + +func ApplicationLayerSpamRecordQueueMetricsFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingApplicationLayerSpamReportQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCMetricsObserverInspectorQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRpcMetricsObserverInspectorQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCInspectorQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRpcValidationInspectorQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCSentTrackerMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRPCSentTrackerCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCSentTrackerQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRPCSentTrackerQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func RpcInspectorNotificationQueueMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + r := ResourceNetworkingRpcInspectorNotificationQueue + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) +} + +func GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(f HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { + // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. + r := ResourceNetworkingRpcClusterPrefixReceivedCache + if networkType == network.PublicNetwork { + r = PrependPublicPrefix(r) + } + return f(namespaceNetwork, r) } func CollectionNodeTransactionsCacheMetrics(registrar prometheus.Registerer, epoch uint64) *HeroCacheCollector { - return NewHeroCacheCollector(internal.NamespaceCollection, fmt.Sprintf("%s_%d", ResourceTransaction, epoch), registrar) + return NewHeroCacheCollector(namespaceCollection, fmt.Sprintf("%s_%d", ResourceTransaction, epoch), registrar) +} + +func FollowerCacheMetrics(registrar prometheus.Registerer) *HeroCacheCollector { + return NewHeroCacheCollector(namespaceFollowerEngine, ResourceFollowerPendingBlocksCache, registrar) } func AccessNodeExecutionDataCacheMetrics(registrar prometheus.Registerer) *HeroCacheCollector { - return NewHeroCacheCollector(internal.NamespaceAccess, ResourceExecutionDataCache, registrar) + return NewHeroCacheCollector(namespaceAccess, ResourceExecutionDataCache, registrar) +} + +// PrependPublicPrefix prepends the string "public" to the given string. +// This is used to distinguish between public and private metrics. +// Args: +// - str: the string to prepend, example: "my_metric" +// Returns: +// - the prepended string, example: "public_my_metric" +func PrependPublicPrefix(str string) string { + return fmt.Sprintf("%s_%s", "public", str) } func NewHeroCacheCollector(nameSpace string, cacheName string, registrar prometheus.Registerer) *HeroCacheCollector { diff --git a/module/metrics/hotstuff.go b/module/metrics/hotstuff.go index 9d972d96c76..df843cdeaa8 100644 --- a/module/metrics/hotstuff.go +++ b/module/metrics/hotstuff.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) // HotStuff Metrics @@ -56,8 +55,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { busyDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "busy_duration_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff's event loop has been busy processing one event", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -65,8 +64,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { idleDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "idle_duration_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff's event loop has been idle without processing any event", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -74,8 +73,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { waitDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "wait_duration_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long an event has been waited in the HotStuff event loop queue before being processed.", Buckets: []float64{0.05, 0.2, 0.5, 1, 2, 5}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -83,56 +82,56 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { curView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "cur_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "the current view that the event handler has entered", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), qcView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "qc_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "The view of the newest known QC from HotStuff", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), tcView: promauto.NewGauge(prometheus.GaugeOpts{ Name: "tc_view", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "The view of the newest known TC from HotStuff", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), skips: promauto.NewCounter(prometheus.CounterOpts{ Name: "skips_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "The number of times we skipped ahead some views", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeouts: promauto.NewCounter(prometheus.CounterOpts{ Name: "timeouts_total", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "The number of views that this replica left due to observing a TC", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeoutDuration: promauto.NewGauge(prometheus.GaugeOpts{ Name: "timeout_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "The current length of the timeout", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), committeeComputationsDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "committee_computations_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends computing consensus committee relations", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -140,8 +139,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { signerComputationsDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "crypto_computations_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends with crypto-related operations", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -149,8 +148,8 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { validatorComputationsDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "message_validation_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends with message-validation", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, @@ -158,47 +157,47 @@ func NewHotstuffCollector(chain flow.ChainID) *HotstuffCollector { payloadProductionDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "payload_production_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long HotStuff sends with payload production", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), blockProcessingDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "block_processing_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long compliance engine processes one block", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), voteProcessingDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "vote_processing_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long VoteAggregator processes one message", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeoutProcessingDuration: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "timeout_object_processing_seconds", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "duration [seconds; measured with float64 precision] of how long TimeoutAggregator processes one message", Buckets: []float64{0.02, 0.05, 0.1, 0.2, 0.5, 1, 2}, ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), timeoutCollectorsRange: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "timeout_collectors_range", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "lowest and highest views that we are maintaining TimeoutCollectors for", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }, []string{"prefix"}), numberOfActiveCollectors: promauto.NewGauge(prometheus.GaugeOpts{ Name: "active_collectors", - Namespace: internal.NamespaceConsensus, - Subsystem: internal.SubsystemHotstuff, + Namespace: namespaceConsensus, + Subsystem: subsystemHotstuff, Help: "number of active TimeoutCollectors that the TimeoutAggregator component currently maintains", ConstLabels: prometheus.Labels{LabelChain: chain.String()}, }), diff --git a/module/metrics/internal/namespaces.go b/module/metrics/internal/namespaces.go deleted file mode 100644 index 75ca48d0647..00000000000 --- a/module/metrics/internal/namespaces.go +++ /dev/null @@ -1,111 +0,0 @@ -package internal - -// Prometheus metric namespaces -const ( - NamespaceNetwork = "network" - NamespaceStorage = "storage" - NamespaceAccess = "access" - NamespaceObserver = "observer" - NamespaceCollection = "collection" - NamespaceConsensus = "consensus" - NamespaceVerification = "verification" - NamespaceExecution = "execution" - NamespaceLoader = "loader" - NamespaceStateSync = "state_synchronization" - NamespaceExecutionDataSync = "execution_data_sync" - NamespaceChainsync = "chainsync" - NamespaceFollowerEngine = "follower" - NamespaceRestAPI = "access_rest_api" -) - -// Network subsystems represent the various layers of networking. -const ( - SubsystemLibp2p = "libp2p" - SubsystemGossip = "gossip" - SubsystemEngine = "engine" - SubsystemQueue = "queue" - SubsystemDHT = "dht" - SubsystemBitswap = "bitswap" - SubsystemAuth = "authorization" - SubsystemRateLimiting = "ratelimit" - SubsystemAlsp = "alsp" - SubsystemSecurity = "security" -) - -// Storage subsystems represent the various components of the storage layer. -const ( - SubsystemBadger = "badger" - SubsystemMempool = "mempool" - SubsystemCache = "cache" -) - -// Access subsystem -const ( - SubsystemTransactionTiming = "transaction_timing" - SubsystemTransactionSubmission = "transaction_submission" - SubsystemConnectionPool = "connection_pool" - SubsystemHTTP = "http" -) - -// Observer subsystem -const ( - SubsystemObserverGRPC = "observer_grpc" -) - -// Collection subsystem -const ( - SubsystemProposal = "proposal" -) - -// Consensus subsystems represent the different components of the consensus algorithm. -const ( - SubsystemCompliance = "compliance" - SubsystemHotstuff = "hotstuff" - SubsystemCruiseCtl = "cruisectl" - SubsystemMatchEngine = "match" -) - -// Execution Subsystems -const ( - SubsystemStateStorage = "state_storage" - SubsystemMTrie = "mtrie" - SubsystemIngestion = "ingestion" - SubsystemRuntime = "runtime" - SubsystemProvider = "provider" - SubsystemBlockDataUploader = "block_data_uploader" -) - -// Verification Subsystems -const ( - SubsystemAssignerEngine = "assigner" - SubsystemFetcherEngine = "fetcher" - SubsystemRequesterEngine = "requester" - SubsystemVerifierEngine = "verifier" - SubsystemBlockConsumer = "block_consumer" - SubsystemChunkConsumer = "chunk_consumer" -) - -// Execution Data Sync Subsystems -const ( - SubsystemExeDataRequester = "requester" - SubsystemExeDataProvider = "provider" - SubsystemExeDataPruner = "pruner" - SubsystemExecutionDataRequester = "execution_data_requester" - SubsystemExeDataBlobstore = "blobstore" -) - -// module/synchronization core -const ( - SubsystemSyncCore = "sync_core" -) - -// METRIC NAMING GUIDELINES -// Namespace: -// * If it's under a module, use the module name. eg: hotstuff, network, storage, mempool, interpreter, crypto -// * If it's a core metric from a node, use the node type. eg: consensus, verification, access -// Subsystem: -// * Subsystem is optional if the entire namespace is small enough to not be segmented further. -// * Within the component, describe the part or function referred to. -// Constant Labels: -// * node_role: [collection, consensus, execution, verification, access] -// * beta_metric: true diff --git a/module/metrics/libp2p_resource_manager.go b/module/metrics/libp2p_resource_manager.go index c8ae59deb13..4effd90d5e5 100644 --- a/module/metrics/libp2p_resource_manager.go +++ b/module/metrics/libp2p_resource_manager.go @@ -11,9 +11,8 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/rs/zerolog" - "github.com/onflow/flow-go/module/metrics/internal" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) type LibP2PResourceManagerMetrics struct { @@ -49,8 +48,8 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP l := &LibP2PResourceManagerMetrics{logger: logger, prefix: prefix} l.allowConnectionCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_allow_connection_total", Help: "total number of connections allowed by the libp2p resource manager", @@ -58,8 +57,8 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP }, []string{LabelConnectionDirection, LabelConnectionUseFD}) l.blockConnectionCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_connection_total", Help: "total number of connections blocked by the libp2p resource manager", @@ -67,22 +66,22 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP }, []string{LabelConnectionDirection, LabelConnectionUseFD}) l.allowStreamCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_allow_stream_total", Help: "total number of streams allowed by the libp2p resource manager", }, []string{LabelConnectionDirection}) l.blockStreamCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_stream_total", Help: "total number of streams blocked by the libp2p resource manager", }, []string{LabelConnectionDirection}) l.allowPeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_allow_peer_total", Help: "total number of remote peers allowed by the libp2p resource manager to attach to their relevant incoming/outgoing streams", }) @@ -90,22 +89,22 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP // Note: this is a low level metric than blockProtocolPeerCount. // This metric is incremented when a peer is blocked by the libp2p resource manager on attaching as one end of a stream (on any protocol). l.blockPeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_peer_total", Help: "total number of remote peers blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams", }) l.allowProtocolCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_allow_protocol_total", Help: "total number of protocols allowed by the libp2p resource manager to attach to their relevant incoming/outgoing streams", }) l.blockProtocolCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_protocol_total", Help: "total number of protocols blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams", }) @@ -113,22 +112,22 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP // Note: this is a higher level metric than blockPeerCount and blockProtocolCount. // This metric is incremented when a peer is already attached as one end of a stream but on a different reserved protocol. l.blockProtocolPeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_protocol_peer_total", Help: "total number of remote peers blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams on a specific protocol", }) l.allowServiceCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_allow_service_total", Help: "total number of remote services (e.g., ping, relay) allowed by the libp2p resource manager to attach to their relevant incoming/outgoing streams", }) l.blockServiceCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_service_total", Help: "total number of remote services (e.g., ping, relay) blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams", }) @@ -136,23 +135,23 @@ func NewLibP2PResourceManagerMetrics(logger zerolog.Logger, prefix string) *LibP // Note: this is a higher level metric than blockServiceCount and blockPeerCount. // This metric is incremented when a service is already attached as one end of a stream but on a different reserved protocol. l.blockServicePeerCount = promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_block_service_peer_total", Help: "total number of remote services (e.g., ping, relay) blocked by the libp2p resource manager from attaching to their relevant incoming/outgoing streams on a specific peer", }) l.allowMemoryHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_allowed_memory_bytes", Help: "size of memory allocation requests allowed by the libp2p resource manager", Buckets: []float64{KiB, 10 * KiB, 100 * KiB, 500 * KiB, 1 * MiB, 10 * MiB, 100 * MiB, 500 * MiB, 1 * GiB}, }) l.blockMemoryHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemLibp2p, + Namespace: namespaceNetwork, + Subsystem: subsystemLibp2p, Name: l.prefix + "resource_manager_blocked_memory_bytes", Help: "size of memory allocation requests blocked by the libp2p resource manager", Buckets: []float64{KiB, 10 * KiB, 100 * KiB, 500 * KiB, 1 * MiB, 10 * MiB, 100 * MiB, 500 * MiB, 1 * GiB}, diff --git a/module/metrics/loader.go b/module/metrics/loader.go index da7c37cce9d..342dc99b845 100644 --- a/module/metrics/loader.go +++ b/module/metrics/loader.go @@ -5,8 +5,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - - "github.com/onflow/flow-go/module/metrics/internal" ) type LoaderCollector struct { @@ -23,27 +21,27 @@ func NewLoaderCollector() *LoaderCollector { cc := &LoaderCollector{ transactionsSent: promauto.NewCounter(prometheus.CounterOpts{ Name: "transactions_sent", - Namespace: internal.NamespaceLoader, + Namespace: namespaceLoader, Help: "transactions sent by the loader", }), transactionsLost: promauto.NewCounter(prometheus.CounterOpts{ Name: "transactions_lost", - Namespace: internal.NamespaceLoader, + Namespace: namespaceLoader, Help: "transaction that took too long to return", }), tpsConfigured: promauto.NewGauge(prometheus.GaugeOpts{ Name: "transactions_per_second_configured", - Namespace: internal.NamespaceLoader, + Namespace: namespaceLoader, Help: "transactions per second that the loader should send", }), transactionsExecuted: promauto.NewCounter(prometheus.CounterOpts{ Name: "transactions_executed", - Namespace: internal.NamespaceLoader, + Namespace: namespaceLoader, Help: "transaction successfully executed by the loader", }), tteInSeconds: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "transactions_executed_in_seconds", - Namespace: internal.NamespaceLoader, + Namespace: namespaceLoader, Help: "Time To Execute histogram for transactions (in seconds)", Buckets: prometheus.ExponentialBuckets(2, 2, 8), }), diff --git a/module/metrics/mempool.go b/module/metrics/mempool.go index a1aea742c8e..7209af9be70 100644 --- a/module/metrics/mempool.go +++ b/module/metrics/mempool.go @@ -9,7 +9,6 @@ import ( "github.com/onflow/flow-go/engine" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type MempoolCollector struct { @@ -30,8 +29,8 @@ func NewMempoolCollector(interval time.Duration) *MempoolCollector { entries: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "entries_total", - Namespace: internal.NamespaceStorage, - Subsystem: internal.SubsystemMempool, + Namespace: namespaceStorage, + Subsystem: subsystemMempool, Help: "the number of entries in the mempool", }, []string{LabelResource}), } diff --git a/module/metrics/namespaces.go b/module/metrics/namespaces.go new file mode 100644 index 00000000000..f89f2a530ae --- /dev/null +++ b/module/metrics/namespaces.go @@ -0,0 +1,111 @@ +package metrics + +// Prometheus metric namespaces +const ( + namespaceNetwork = "network" + namespaceStorage = "storage" + namespaceAccess = "access" + namespaceObserver = "observer" + namespaceCollection = "collection" + namespaceConsensus = "consensus" + namespaceVerification = "verification" + namespaceExecution = "execution" + namespaceLoader = "loader" + namespaceStateSync = "state_synchronization" + namespaceExecutionDataSync = "execution_data_sync" + namespaceChainsync = "chainsync" + namespaceFollowerEngine = "follower" + namespaceRestAPI = "access_rest_api" +) + +// Network subsystems represent the various layers of networking. +const ( + subsystemLibp2p = "libp2p" + subsystemGossip = "gossip" + subsystemEngine = "engine" + subsystemQueue = "queue" + subsystemDHT = "dht" + subsystemBitswap = "bitswap" + subsystemAuth = "authorization" + subsystemRateLimiting = "ratelimit" + subsystemAlsp = "alsp" + subsystemSecurity = "security" +) + +// Storage subsystems represent the various components of the storage layer. +const ( + subsystemBadger = "badger" + subsystemMempool = "mempool" + subsystemCache = "cache" +) + +// Access subsystem +const ( + subsystemTransactionTiming = "transaction_timing" + subsystemTransactionSubmission = "transaction_submission" + subsystemConnectionPool = "connection_pool" + subsystemHTTP = "http" +) + +// Observer subsystem +const ( + subsystemObserverGRPC = "observer_grpc" +) + +// Collection subsystem +const ( + subsystemProposal = "proposal" +) + +// Consensus subsystems represent the different components of the consensus algorithm. +const ( + subsystemCompliance = "compliance" + subsystemHotstuff = "hotstuff" + subsystemCruiseCtl = "cruisectl" + subsystemMatchEngine = "match" +) + +// Execution Subsystems +const ( + subsystemStateStorage = "state_storage" + subsystemMTrie = "mtrie" + subsystemIngestion = "ingestion" + subsystemRuntime = "runtime" + subsystemProvider = "provider" + subsystemBlockDataUploader = "block_data_uploader" +) + +// Verification Subsystems +const ( + subsystemAssignerEngine = "assigner" + subsystemFetcherEngine = "fetcher" + subsystemRequesterEngine = "requester" + subsystemVerifierEngine = "verifier" + subsystemBlockConsumer = "block_consumer" + subsystemChunkConsumer = "chunk_consumer" +) + +// Execution Data Sync Subsystems +const ( + subsystemExeDataRequester = "requester" + subsystemExeDataProvider = "provider" + subsystemExeDataPruner = "pruner" + subsystemExecutionDataRequester = "execution_data_requester" + subsystemExeDataBlobstore = "blobstore" +) + +// module/synchronization core +const ( + subsystemSyncCore = "sync_core" +) + +// METRIC NAMING GUIDELINES +// Namespace: +// * If it's under a module, use the module name. eg: hotstuff, network, storage, mempool, interpreter, crypto +// * If it's a core metric from a node, use the node type. eg: consensus, verification, access +// Subsystem: +// * Subsystem is optional if the entire namespace is small enough to not be segmented further. +// * Within the component, describe the part or function referred to. +// Constant Labels: +// * node_role: [collection, consensus, execution, verification, access] +// * beta_metric: true diff --git a/module/metrics/network.go b/module/metrics/network.go index 27efce5e4c0..af9359fef21 100644 --- a/module/metrics/network.go +++ b/module/metrics/network.go @@ -10,9 +10,8 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( @@ -84,8 +83,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.outboundMessageSize = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "outbound_message_size_bytes", Help: "size of the outbound network message", Buckets: []float64{KiB, 100 * KiB, 500 * KiB, 1 * MiB, 2 * MiB, 4 * MiB}, @@ -94,8 +93,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.inboundMessageSize = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "inbound_message_size_bytes", Help: "size of the inbound network message", Buckets: []float64{KiB, 100 * KiB, 500 * KiB, 1 * MiB, 2 * MiB, 4 * MiB}, @@ -104,8 +103,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.duplicateMessagesDropped = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "duplicate_messages_dropped", Help: "number of duplicate messages dropped", }, []string{LabelChannel, LabelProtocol, LabelMessage}, @@ -113,8 +112,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsLookupDuration = promauto.NewHistogram( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "dns_lookup_duration_ms", Buckets: []float64{1, 10, 100, 500, 1000, 2000}, Help: "the time spent on resolving a dns lookup (including cache hits)", @@ -123,8 +122,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsCacheMissCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "dns_cache_miss_total", Help: "the number of dns lookups that miss the cache and made through network", }, @@ -132,8 +131,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsCacheInvalidationCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "dns_cache_invalidation_total", Help: "the number of times dns cache is invalidated for an entry", }, @@ -141,8 +140,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsCacheHitCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "dns_cache_hit_total", Help: "the number of dns cache hits", }, @@ -150,8 +149,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.dnsLookupRequestDroppedCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "dns_lookup_requests_dropped_total", Help: "the number of dns lookup requests dropped", }, @@ -159,8 +158,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.queueSize = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemQueue, + Namespace: namespaceNetwork, + Subsystem: subsystemQueue, Name: nc.prefix + "message_queue_size", Help: "the number of elements in the message receive queue", }, []string{LabelPriority}, @@ -168,8 +167,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.queueDuration = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemQueue, + Namespace: namespaceNetwork, + Subsystem: subsystemQueue, Name: nc.prefix + "message_queue_duration_seconds", Help: "duration [seconds; measured with float64 precision] of how long a message spent in the queue before delivered to an engine.", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, // 10ms, 100ms, 500ms, 1s, 2s, 5s @@ -178,8 +177,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.numMessagesProcessing = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemQueue, + Namespace: namespaceNetwork, + Subsystem: subsystemQueue, Name: nc.prefix + "current_messages_processing", Help: "the number of messages currently being processed", }, []string{LabelChannel}, @@ -187,8 +186,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.numDirectMessagesSending = promauto.NewGaugeVec( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: nc.prefix + "direct_messages_in_progress", Help: "the number of direct messages currently in the process of sending", }, []string{LabelChannel}, @@ -196,8 +195,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.inboundProcessTime = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemQueue, + Namespace: namespaceNetwork, + Subsystem: subsystemQueue, Name: nc.prefix + "engine_message_processing_time_seconds", Help: "duration [seconds; measured with float64 precision] of how long a queue worker blocked for an engine processing message", }, []string{LabelChannel}, @@ -205,8 +204,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.outboundConnectionCount = promauto.NewGauge( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemQueue, + Namespace: namespaceNetwork, + Subsystem: subsystemQueue, Name: nc.prefix + "outbound_connection_count", Help: "the number of outbound connections of this node", }, @@ -214,8 +213,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.inboundConnectionCount = promauto.NewGauge( prometheus.GaugeOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemQueue, + Namespace: namespaceNetwork, + Subsystem: subsystemQueue, Name: nc.prefix + "inbound_connection_count", Help: "the number of inbound connections of this node", }, @@ -224,16 +223,16 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.routingTableSize = promauto.NewGauge( prometheus.GaugeOpts{ Name: nc.prefix + "routing_table_size", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemDHT, + Namespace: namespaceNetwork, + Subsystem: subsystemDHT, Help: "the size of the DHT routing table", }, ) nc.unAuthorizedMessagesCount = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemAuth, + Namespace: namespaceNetwork, + Subsystem: subsystemAuth, Name: nc.prefix + "unauthorized_messages_count", Help: "number of messages that failed authorization validation", }, []string{LabelNodeRole, LabelMessage, LabelChannel, LabelViolationReason}, @@ -241,8 +240,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.rateLimitedUnicastMessagesCount = promauto.NewCounterVec( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemRateLimiting, + Namespace: namespaceNetwork, + Subsystem: subsystemRateLimiting, Name: nc.prefix + "rate_limited_unicast_messages_count", Help: "number of messages sent via unicast that have been rate limited", }, []string{LabelNodeRole, LabelMessage, LabelChannel, LabelRateLimitReason}, @@ -250,8 +249,8 @@ func NewNetworkCollector(logger zerolog.Logger, opts ...NetworkCollectorOpt) *Ne nc.violationReportSkippedCount = promauto.NewCounter( prometheus.CounterOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemSecurity, + Namespace: namespaceNetwork, + Subsystem: subsystemSecurity, Name: nc.prefix + "slashing_violation_reports_skipped_count", Help: "number of slashing violations consumer violations that were not reported for misbehavior because the identity of the sender not known", }, diff --git a/module/metrics/network/herocache.go b/module/metrics/network/herocache.go deleted file mode 100644 index d1c0744ea3a..00000000000 --- a/module/metrics/network/herocache.go +++ /dev/null @@ -1,131 +0,0 @@ -package networkmetrics - -import ( - "fmt" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/internal" - "github.com/onflow/flow-go/network" -) - -func NetworkReceiveCacheMetricsFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := metrics.ResourceNetworkingReceiveCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -// DisallowListCacheMetricsFactory is the factory method for creating a new HeroCacheCollector for the disallow list cache. -// The disallow-list cache is used to keep track of peers that are disallow-listed and the reasons for it. -// Args: -// - f: the HeroCacheMetricsFactory to create the collector -// - networkingType: the networking type of the cache, i.e., whether it is used for the public or the private network -// Returns: -// - a HeroCacheMetrics for the disallow list cache -func DisallowListCacheMetricsFactory(f metrics.HeroCacheMetricsFactory, networkingType network.NetworkingType) module.HeroCacheMetrics { - r := metrics.ResourceNetworkingDisallowListCache - if networkingType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func NetworkDnsTxtCacheMetricsFactory(registrar prometheus.Registerer) *metrics.HeroCacheCollector { - return metrics.NewHeroCacheCollector(internal.NamespaceNetwork, metrics.ResourceNetworkingDnsTxtCache, registrar) -} - -func NetworkDnsIpCacheMetricsFactory(registrar prometheus.Registerer) *metrics.HeroCacheCollector { - return metrics.NewHeroCacheCollector(internal.NamespaceNetwork, metrics.ResourceNetworkingDnsIpCache, registrar) -} - -func DisallowListNotificationQueueMetricFactory(registrar prometheus.Registerer) *metrics.HeroCacheCollector { - return metrics.NewHeroCacheCollector(internal.NamespaceNetwork, metrics.ResourceNetworkingDisallowListNotificationQueue, registrar) -} - -func ApplicationLayerSpamRecordCacheMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := metrics.ResourceNetworkingApplicationLayerSpamRecordCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - - return f(internal.NamespaceNetwork, r) -} - -func ApplicationLayerSpamRecordQueueMetricsFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := metrics.ResourceNetworkingApplicationLayerSpamReportQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func GossipSubRPCMetricsObserverInspectorQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := metrics.ResourceNetworkingRpcMetricsObserverInspectorQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func GossipSubRPCInspectorQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := metrics.ResourceNetworkingRpcValidationInspectorQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func GossipSubRPCSentTrackerMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := metrics.ResourceNetworkingRPCSentTrackerCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func GossipSubRPCSentTrackerQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := metrics.ResourceNetworkingRPCSentTrackerQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func RpcInspectorNotificationQueueMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - r := metrics.ResourceNetworkingRpcInspectorNotificationQueue - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(f metrics.HeroCacheMetricsFactory, networkType network.NetworkingType) module.HeroCacheMetrics { - // we don't use the public prefix for the metrics here for sake of backward compatibility of metric name. - r := metrics.ResourceNetworkingRpcClusterPrefixReceivedCache - if networkType == network.PublicNetwork { - r = PrependPublicPrefix(r) - } - return f(internal.NamespaceNetwork, r) -} - -func FollowerCacheMetrics(registrar prometheus.Registerer) *metrics.HeroCacheCollector { - return metrics.NewHeroCacheCollector(internal.NamespaceFollowerEngine, metrics.ResourceFollowerPendingBlocksCache, registrar) -} - -// PrependPublicPrefix prepends the string "public" to the given string. -// This is used to distinguish between public and private metrics. -// Args: -// - str: the string to prepend, example: "my_metric" -// Returns: -// - the prepended string, example: "public_my_metric" -func PrependPublicPrefix(str string) string { - return fmt.Sprintf("%s_%s", "public", str) -} diff --git a/module/metrics/observer.go b/module/metrics/observer.go index 3aa2a9d684a..375aa66a2ac 100644 --- a/module/metrics/observer.go +++ b/module/metrics/observer.go @@ -4,8 +4,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "google.golang.org/grpc/codes" - - "github.com/onflow/flow-go/module/metrics/internal" ) type ObserverMetrics interface { @@ -21,8 +19,8 @@ var _ ObserverMetrics = (*ObserverCollector)(nil) func NewObserverCollector() *ObserverCollector { return &ObserverCollector{ rpcs: promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: internal.NamespaceObserver, - Subsystem: internal.SubsystemObserverGRPC, + Namespace: namespaceObserver, + Subsystem: subsystemObserverGRPC, Name: "handler_grpc_counter", Help: "tracking error/success rate of each rpc for the observer service", }, []string{"handler", "grpc_method", "grpc_code"}), diff --git a/module/metrics/ping.go b/module/metrics/ping.go index cfb5f5597fa..2bc20f42c82 100644 --- a/module/metrics/ping.go +++ b/module/metrics/ping.go @@ -7,7 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/module/metrics/internal" ) type PingCollector struct { @@ -20,20 +19,20 @@ func NewPingCollector() *PingCollector { pc := &PingCollector{ reachable: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "node_reachable", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Help: "report whether a node is reachable", }, []string{LabelNodeID, LabelNodeAddress, LabelNodeRole, LabelNodeInfo}), sealedHeight: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "sealed_height", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Help: "the last sealed height of a node", }, []string{LabelNodeID, LabelNodeAddress, LabelNodeRole, LabelNodeInfo, LabelNodeVersion}), hotstuffCurView: promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "hotstuff_curview", - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Help: "the hotstuff current view", }, []string{LabelNodeID, LabelNodeAddress, LabelNodeInfo}), } diff --git a/module/metrics/rate_limited_blockstore.go b/module/metrics/rate_limited_blockstore.go index 707b1b70ca9..daebafd6827 100644 --- a/module/metrics/rate_limited_blockstore.go +++ b/module/metrics/rate_limited_blockstore.go @@ -5,7 +5,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type RateLimitedBlockstoreCollector struct { @@ -15,8 +14,8 @@ type RateLimitedBlockstoreCollector struct { func NewRateLimitedBlockstoreCollector(prefix string) module.RateLimitedBlockstoreMetrics { return &RateLimitedBlockstoreCollector{ bytesRead: promauto.NewCounter(prometheus.CounterOpts{ - Namespace: internal.NamespaceStateSync, - Subsystem: internal.SubsystemExeDataBlobstore, + Namespace: namespaceStateSync, + Subsystem: subsystemExeDataBlobstore, Name: prefix + "_bytes_read", Help: "number of bytes read from the blockstore", }), diff --git a/module/metrics/rest_api.go b/module/metrics/rest_api.go index f112cf22261..e9132f243c6 100644 --- a/module/metrics/rest_api.go +++ b/module/metrics/rest_api.go @@ -9,7 +9,6 @@ import ( httpmetrics "github.com/slok/go-http-metrics/metrics" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type RestCollector struct { @@ -34,31 +33,31 @@ func NewRestCollector(urlToRouteMapper func(string) (string, error), registerer r := &RestCollector{ urlToRouteMapper: urlToRouteMapper, httpRequestDurHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: internal.NamespaceRestAPI, - Subsystem: internal.SubsystemHTTP, + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "request_duration_seconds", Help: "The latency of the HTTP requests.", Buckets: prometheus.DefBuckets, }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpResponseSizeHistogram: prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: internal.NamespaceRestAPI, - Subsystem: internal.SubsystemHTTP, + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "response_size_bytes", Help: "The size of the HTTP responses.", Buckets: prometheus.ExponentialBuckets(100, 10, 8), }, []string{LabelService, LabelHandler, LabelMethod, LabelStatusCode}), httpRequestsInflight: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: internal.NamespaceRestAPI, - Subsystem: internal.SubsystemHTTP, + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "requests_inflight", Help: "The number of inflight requests being handled at the same time.", }, []string{LabelService, LabelHandler}), httpRequestsTotal: prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: internal.NamespaceRestAPI, - Subsystem: internal.SubsystemHTTP, + Namespace: namespaceRestAPI, + Subsystem: subsystemHTTP, Name: "requests_total", Help: "The number of requests handled over time.", }, []string{LabelMethod, LabelHandler}), diff --git a/module/metrics/transaction.go b/module/metrics/transaction.go index 5757b6079a1..50fca53bf39 100644 --- a/module/metrics/transaction.go +++ b/module/metrics/transaction.go @@ -10,7 +10,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" - "github.com/onflow/flow-go/module/metrics/internal" ) type TransactionCollector struct { @@ -51,8 +50,8 @@ func NewTransactionCollector( logTimeToFinalizedExecuted: logTimeToFinalizedExecuted, timeToFinalized: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_finalized_seconds", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionTiming, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionTiming, Help: "the duration of how long it took between the transaction was received until it was finalized", Objectives: map[float64]float64{ 0.01: 0.001, @@ -65,8 +64,8 @@ func NewTransactionCollector( }), timeToExecuted: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_executed_seconds", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionTiming, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionTiming, Help: "the duration of how long it took between the transaction was received until it was executed", Objectives: map[float64]float64{ 0.01: 0.001, @@ -79,8 +78,8 @@ func NewTransactionCollector( }), timeToFinalizedExecuted: promauto.NewSummary(prometheus.SummaryOpts{ Name: "time_to_finalized_executed_seconds", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionTiming, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionTiming, Help: "the duration of how long it took between the transaction was received until it was both " + "finalized and executed", Objectives: map[float64]float64{ @@ -94,46 +93,46 @@ func NewTransactionCollector( }), transactionSubmission: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "transaction_submission", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "counter for the success/failure of transaction submissions", }, []string{"result"}), scriptExecutedDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "script_executed_duration", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "histogram for the duration in ms of the round trip time for executing a script", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }, []string{"script_size"}), scriptExecutionErrorOnExecutor: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "script_execution_error_archive", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "histogram for the internal errors for executing a script for a block on the archive node", }, []string{"source"}), scriptExecutionComparison: promauto.NewCounterVec(prometheus.CounterOpts{ Name: "script_execution_comparison", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "histogram for the comparison outcomes of executing a script on the archive and execution node", }, []string{"outcome"}), transactionResultDuration: promauto.NewHistogramVec(prometheus.HistogramOpts{ Name: "transaction_result_fetched_duration", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "histogram for the duration in ms of the round trip time for getting a transaction result", Buckets: []float64{1, 100, 500, 1000, 2000, 5000}, }, []string{"payload_size"}), scriptSize: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "script_size", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "histogram for the script size in kb of scripts used in ExecuteScript", }), transactionSize: promauto.NewHistogram(prometheus.HistogramOpts{ Name: "transaction_size", - Namespace: internal.NamespaceAccess, - Subsystem: internal.SubsystemTransactionSubmission, + Namespace: namespaceAccess, + Subsystem: subsystemTransactionSubmission, Help: "histogram for the transaction size in kb of scripts used in GetTransactionResult", }), } diff --git a/module/metrics/unicast_manager.go b/module/metrics/unicast_manager.go index 2e1147ff7a5..f790996d490 100644 --- a/module/metrics/unicast_manager.go +++ b/module/metrics/unicast_manager.go @@ -7,7 +7,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) // UnicastManagerMetrics metrics collector for the unicast manager. @@ -35,8 +34,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamRetriesDueToDialBackoff = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: uc.prefix + "attempts_to_create_stream_due_to_in_progress_dial_total", Help: "the number of times a stream creation is retried due to a dial in progress", Buckets: []float64{1, 2, 3}, @@ -45,8 +44,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamTimeDueToDialBackoff = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: uc.prefix + "overall_time_to_create_stream_seconds", Help: "the amount of time it takes to create a stream successfully in seconds including the time to create a connection when needed", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, @@ -55,8 +54,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.dialPeerRetries = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: uc.prefix + "attempts_to_dial_peer_total", Help: "number of retry attempts before a connection is established successfully", Buckets: []float64{1, 2, 3}, @@ -65,8 +64,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.dialPeerTime = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: uc.prefix + "time_to_dial_peer_seconds", Help: "the amount of time it takes to dial a peer and establish a connection during stream creation", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, @@ -75,8 +74,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamOnConnRetries = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: uc.prefix + "attempts_to_create_stream_on_connection_total", Help: "number of retry attempts before a stream is created on the available connection between two peers", Buckets: []float64{1, 2, 3}, @@ -85,8 +84,8 @@ func NewUnicastManagerMetrics(prefix string) *UnicastManagerMetrics { uc.createStreamOnConnTime = promauto.NewHistogramVec( prometheus.HistogramOpts{ - Namespace: internal.NamespaceNetwork, - Subsystem: internal.SubsystemGossip, + Namespace: namespaceNetwork, + Subsystem: subsystemGossip, Name: uc.prefix + "time_to_create_stream_on_connection_seconds", Help: "the amount of time it takes to create a stream on the available connection between two peers", Buckets: []float64{0.01, 0.1, 0.5, 1, 2, 5}, diff --git a/module/metrics/verification.go b/module/metrics/verification.go index f980eb77312..b89d2bc1584 100644 --- a/module/metrics/verification.go +++ b/module/metrics/verification.go @@ -4,7 +4,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/metrics/internal" ) type VerificationCollector struct { @@ -49,109 +48,109 @@ func NewVerificationCollector(tracer module.Tracer, registerer prometheus.Regist // Job Consumers lastProcessedBlockJobIndexBlockConsumer := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "last_processed_block_job_index", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemBlockConsumer, + Namespace: namespaceVerification, + Subsystem: subsystemBlockConsumer, Help: "the last block job index processed by block consumer", }) lastProcessedChunkJobIndexChunkConsumer := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "last_processed_chunk_job_index", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemChunkConsumer, + Namespace: namespaceVerification, + Subsystem: subsystemChunkConsumer, Help: "the last chunk job index processed by chunk consumer", }) // Assigner Engine receivedFinalizedHeightAssigner := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "finalized_height", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemAssignerEngine, + Namespace: namespaceVerification, + Subsystem: subsystemAssignerEngine, Help: "the last finalized height received by assigner engine", }) receivedResultsTotalAssigner := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "received_result_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemAssignerEngine, + Namespace: namespaceVerification, + Subsystem: subsystemAssignerEngine, Help: "total number of execution results received by assigner engine", }) assignedChunksTotalAssigner := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_assigned_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemAssignerEngine, + Namespace: namespaceVerification, + Subsystem: subsystemAssignerEngine, Help: "total number of chunks assigned to verification node", }) sentChunksTotalAssigner := prometheus.NewCounter(prometheus.CounterOpts{ Name: "processed_chunk_sent_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemAssignerEngine, + Namespace: namespaceVerification, + Subsystem: subsystemAssignerEngine, Help: "total number chunks sent by assigner engine to chunk consumer", }) // Fetcher Engine receivedAssignedChunksTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "assigned_chunk_received_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemFetcherEngine, + Namespace: namespaceVerification, + Subsystem: subsystemFetcherEngine, Help: "total number of chunks received by fetcher engine from assigner engine through chunk consumer", }) // Requester Engine receivedChunkDataPackRequestsTotalRequester := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_request_received_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemRequesterEngine, + Namespace: namespaceVerification, + Subsystem: subsystemRequesterEngine, Help: "total number of chunk data pack requests received by requester engine from fetcher engine", }) sentChunkDataRequestMessagesTotalRequester := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_request_message_sent_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemRequesterEngine, + Namespace: namespaceVerification, + Subsystem: subsystemRequesterEngine, Help: "total number of chunk data pack request messages sent in the network by requester engine", }) receivedChunkDataResponseMessagesTotalRequester := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_response_message_received_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemRequesterEngine, + Namespace: namespaceVerification, + Subsystem: subsystemRequesterEngine, Help: "total number of chunk data response messages received from network by requester engine", }) sentChunkDataPackByRequesterTotal := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_sent_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemRequesterEngine, + Namespace: namespaceVerification, + Subsystem: subsystemRequesterEngine, Help: "total number of chunk data packs sent by requester engine to fetcher engine", }) sentVerifiableChunksTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "verifiable_chunk_sent_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemFetcherEngine, + Namespace: namespaceVerification, + Subsystem: subsystemFetcherEngine, Help: "total number of verifiable chunks sent by fetcher engine to verifier engine", }) receivedChunkDataPackTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_received_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemFetcherEngine, + Namespace: namespaceVerification, + Subsystem: subsystemFetcherEngine, Help: "total number of chunk data packs received by fetcher engine", }) requestedChunkDataPackTotalFetcher := prometheus.NewCounter(prometheus.CounterOpts{ Name: "chunk_data_pack_requested_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemFetcherEngine, + Namespace: namespaceVerification, + Subsystem: subsystemFetcherEngine, Help: "total number of chunk data packs requested by fetcher engine", }) maxChunkDataPackRequestAttemptForNextUnsealedHeight := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "next_unsealed_height_max_chunk_data_pack_request_attempt_times", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemRequesterEngine, + Namespace: namespaceVerification, + Subsystem: subsystemRequesterEngine, // an indicator for when execution nodes is unresponsive to chunk data pack requests, // in which case verification node will keep requesting the chunk data pack, and this // metrics number will go up. @@ -162,15 +161,15 @@ func NewVerificationCollector(tracer module.Tracer, registerer prometheus.Regist // Verifier Engine receivedVerifiableChunksTotalVerifier := prometheus.NewCounter(prometheus.CounterOpts{ Name: "verifiable_chunk_received_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemVerifierEngine, + Namespace: namespaceVerification, + Subsystem: subsystemVerifierEngine, Help: "total number verifiable chunks received by verifier engine from fetcher engine", }) sentResultApprovalTotalVerifier := prometheus.NewCounter(prometheus.CounterOpts{ Name: "result_approvals_total", - Namespace: internal.NamespaceVerification, - Subsystem: internal.SubsystemVerifierEngine, + Namespace: namespaceVerification, + Subsystem: subsystemVerifierEngine, Help: "total number of emitted result approvals by verifier engine", }) diff --git a/module/state_synchronization/execution_data_requester.go b/module/state_synchronization/execution_data_requester.go index 2a022a160f6..dd479455698 100644 --- a/module/state_synchronization/execution_data_requester.go +++ b/module/state_synchronization/execution_data_requester.go @@ -3,11 +3,11 @@ package state_synchronization import ( "github.com/onflow/flow-go/consensus/hotstuff/model" "github.com/onflow/flow-go/module/component" - model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" ) // OnExecutionDataReceivedConsumer is a callback that is called ExecutionData is received for a new block -type OnExecutionDataReceivedConsumer func(*model2.BlockExecutionDataEntity) +type OnExecutionDataReceivedConsumer func(*execution_data.BlockExecutionDataEntity) // ExecutionDataRequester is a component that syncs ExecutionData from the network, and exposes // a callback that is called when a new ExecutionData is received diff --git a/module/state_synchronization/requester/distributer.go b/module/state_synchronization/requester/distributer.go index 2e0b95535e8..ded5ebb95a2 100644 --- a/module/state_synchronization/requester/distributer.go +++ b/module/state_synchronization/requester/distributer.go @@ -3,7 +3,7 @@ package requester import ( "sync" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/state_synchronization" ) @@ -27,7 +27,7 @@ func (p *ExecutionDataDistributor) AddOnExecutionDataReceivedConsumer(consumer s } // OnExecutionDataReceived is called when new execution data is received -func (p *ExecutionDataDistributor) OnExecutionDataReceived(executionData *model.BlockExecutionDataEntity) { +func (p *ExecutionDataDistributor) OnExecutionDataReceived(executionData *execution_data.BlockExecutionDataEntity) { p.lock.Lock() defer p.lock.Unlock() diff --git a/module/state_synchronization/requester/execution_data_requester.go b/module/state_synchronization/requester/execution_data_requester.go index a622b70b44a..6cc1a828e91 100644 --- a/module/state_synchronization/requester/execution_data_requester.go +++ b/module/state_synchronization/requester/execution_data_requester.go @@ -17,7 +17,6 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/jobqueue" "github.com/onflow/flow-go/module/state_synchronization" @@ -441,7 +440,7 @@ func (e *executionDataRequester) processNotificationJob(ctx irrecoverable.Signal e.metrics.NotificationSent(entry.Height) } -func (e *executionDataRequester) notifyConsumers(executionData *model2.BlockExecutionDataEntity) { +func (e *executionDataRequester) notifyConsumers(executionData *execution_data.BlockExecutionDataEntity) { e.consumerMu.RLock() defer e.consumerMu.RUnlock() diff --git a/module/state_synchronization/requester/execution_data_requester_test.go b/module/state_synchronization/requester/execution_data_requester_test.go index 37fe1dac1a3..5ac29329094 100644 --- a/module/state_synchronization/requester/execution_data_requester_test.go +++ b/module/state_synchronization/requester/execution_data_requester_test.go @@ -25,7 +25,6 @@ import ( "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" - model2 "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" @@ -76,7 +75,7 @@ type testExecutionDataServiceEntry struct { // When set (and fn is unset), this error will be returned for any calls to Get for this ED Err error // Otherwise, the execution data will be returned directly with no error - ExecutionData *model2.BlockExecutionData + ExecutionData *execution_data.BlockExecutionData } type specialBlockGenerator func(int) map[uint64]testExecutionDataCallback @@ -86,12 +85,12 @@ type edTestRun struct { specialBlocks specialBlockGenerator } -type testExecutionDataCallback func(*model2.BlockExecutionData) (*model2.BlockExecutionData, error) +type testExecutionDataCallback func(*execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) *exedatamock.Downloader { downloader := new(exedatamock.Downloader) - get := func(id flow.Identifier) (*model2.BlockExecutionData, error) { + get := func(id flow.Identifier) (*execution_data.BlockExecutionData, error) { ed, has := edStore[id] // return not found @@ -115,7 +114,7 @@ func mockDownloader(edStore map[flow.Identifier]*testExecutionDataServiceEntry) downloader.On("Get", mock.Anything, mock.AnythingOfType("flow.Identifier")). Return( - func(ctx context.Context, id flow.Identifier) *model2.BlockExecutionData { + func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { ed, _ := get(id) return ed }, @@ -326,7 +325,7 @@ func generateBlocksWithSomeMissed(blockCount int) map[uint64]testExecutionDataCa failures := rand.Intn(3) + 1 attempts := 0 - missing[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + missing[i] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { if attempts < failures*2 { // this func is run twice for every attempt by the mock (once for ExecutionData one for errors) attempts++ // This should fail the first n fetch attempts @@ -349,7 +348,7 @@ func generateBlocksWithRandomDelays(blockCount int) map[uint64]testExecutionData continue } - delays[i] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + delays[i] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { time.Sleep(time.Duration(rand.Intn(25)) * time.Millisecond) return ed, nil } @@ -365,7 +364,7 @@ func generateBlocksWithHaltingError(blockCount int) (specialBlockGenerator, erro generate := func(int) map[uint64]testExecutionDataCallback { return map[uint64]testExecutionDataCallback{ - height: func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + height: func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { return nil, err }, } @@ -377,7 +376,7 @@ func generatePauseResume(pauseHeight uint64) (specialBlockGenerator, func()) { pause := make(chan struct{}) blocks := map[uint64]testExecutionDataCallback{} - blocks[pauseHeight] = func(ed *model2.BlockExecutionData) (*model2.BlockExecutionData, error) { + blocks[pauseHeight] = func(ed *execution_data.BlockExecutionData) (*execution_data.BlockExecutionData, error) { <-pause return ed, nil } @@ -531,8 +530,8 @@ func (suite *ExecutionDataRequesterSuite) runRequesterTest(edr state_synchroniza return fetchedExecutionData } -func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData) func(ed *model2.BlockExecutionDataEntity) { - return func(ed *model2.BlockExecutionDataEntity) { +func (suite *ExecutionDataRequesterSuite) consumeExecutionDataNotifications(cfg *fetchTestRun, done func(), fetchedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData) func(ed *execution_data.BlockExecutionDataEntity) { + return func(ed *execution_data.BlockExecutionDataEntity) { if _, has := fetchedExecutionData[ed.BlockID]; has { suite.T().Errorf("duplicate execution data for block %s", ed.BlockID) return @@ -569,7 +568,7 @@ func (suite *ExecutionDataRequesterSuite) finalizeBlocks(cfg *fetchTestRun, foll } } -type receivedExecutionData map[flow.Identifier]*model2.BlockExecutionData +type receivedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData type fetchTestRun struct { sealedCount int startHeight uint64 @@ -579,14 +578,14 @@ type fetchTestRun struct { resultsByID map[flow.Identifier]*flow.ExecutionResult resultsByBlockID map[flow.Identifier]*flow.ExecutionResult sealsByBlockID map[flow.Identifier]*flow.Seal - executionDataByID map[flow.Identifier]*model2.BlockExecutionData + executionDataByID map[flow.Identifier]*execution_data.BlockExecutionData executionDataEntries map[flow.Identifier]*testExecutionDataServiceEntry executionDataIDByBlockID map[flow.Identifier]flow.Identifier expectedIrrecoverable error stopHeight uint64 resumeHeight uint64 - fetchedExecutionData map[flow.Identifier]*model2.BlockExecutionData + fetchedExecutionData map[flow.Identifier]*execution_data.BlockExecutionData waitTimeout time.Duration maxSearchAhead uint64 @@ -630,7 +629,7 @@ func (suite *ExecutionDataRequesterSuite) generateTestData(blockCount int, speci resultsByID := map[flow.Identifier]*flow.ExecutionResult{} resultsByBlockID := map[flow.Identifier]*flow.ExecutionResult{} sealsByBlockID := map[flow.Identifier]*flow.Seal{} - executionDataByID := map[flow.Identifier]*model2.BlockExecutionData{} + executionDataByID := map[flow.Identifier]*execution_data.BlockExecutionData{} executionDataIDByBlockID := map[flow.Identifier]flow.Identifier{} sealedCount := blockCount - 4 // seals for blocks 1-96 diff --git a/module/state_synchronization/requester/jobs/execution_data_reader.go b/module/state_synchronization/requester/jobs/execution_data_reader.go index 44bdcd6ca4a..bd5f7adbeae 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader.go @@ -7,8 +7,8 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/storage" ) @@ -17,7 +17,7 @@ import ( type BlockEntry struct { BlockID flow.Identifier Height uint64 - ExecutionData *model.BlockExecutionDataEntity + ExecutionData *execution_data.BlockExecutionDataEntity } var _ module.Jobs = (*ExecutionDataReader)(nil) diff --git a/module/state_synchronization/requester/jobs/execution_data_reader_test.go b/module/state_synchronization/requester/jobs/execution_data_reader_test.go index df8b7162bcf..90240c83dd8 100644 --- a/module/state_synchronization/requester/jobs/execution_data_reader_test.go +++ b/module/state_synchronization/requester/jobs/execution_data_reader_test.go @@ -13,9 +13,9 @@ import ( "github.com/onflow/flow-go/engine/access/state_stream" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" "github.com/onflow/flow-go/module/executiondatasync/execution_data/cache" exedatamock "github.com/onflow/flow-go/module/executiondatasync/execution_data/mock" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/herocache" "github.com/onflow/flow-go/module/metrics" @@ -36,7 +36,7 @@ type ExecutionDataReaderSuite struct { fetchTimeout time.Duration executionDataID flow.Identifier - executionData *model.BlockExecutionData + executionData *execution_data.BlockExecutionData block *flow.Block blocksByHeight map[uint64]*flow.Block @@ -106,9 +106,9 @@ func (suite *ExecutionDataReaderSuite) reset() { } func (suite *ExecutionDataReaderSuite) TestAtIndex() { - setExecutionDataGet := func(executionData *model.BlockExecutionData, err error) { + setExecutionDataGet := func(executionData *execution_data.BlockExecutionData, err error) { suite.downloader.On("Get", mock.Anything, suite.executionDataID).Return( - func(ctx context.Context, id flow.Identifier) *model.BlockExecutionData { + func(ctx context.Context, id flow.Identifier) *execution_data.BlockExecutionData { return executionData }, func(ctx context.Context, id flow.Identifier) error { @@ -139,7 +139,7 @@ func (suite *ExecutionDataReaderSuite) TestAtIndex() { ed := unittest.BlockExecutionDataFixture() setExecutionDataGet(ed, nil) - edEntity := model.NewBlockExecutionDataEntity(suite.executionDataID, ed) + edEntity := execution_data.NewBlockExecutionDataEntity(suite.executionDataID, ed) job, err := suite.reader.AtIndex(suite.block.Header.Height) require.NoError(suite.T(), err) diff --git a/network/alsp/manager/manager.go b/network/alsp/manager/manager.go index 71bda556749..f29fbc694b4 100644 --- a/network/alsp/manager/manager.go +++ b/network/alsp/manager/manager.go @@ -16,7 +16,6 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/alsp" "github.com/onflow/flow-go/network/alsp/internal" @@ -178,7 +177,7 @@ func NewMisbehaviorReportManager(cfg *MisbehaviorReportManagerConfig, consumer n store := queue.NewHeroStore( cfg.SpamReportQueueSize, lg.With().Str("component", "spam_record_queue").Logger(), - networkmetrics.ApplicationLayerSpamRecordQueueMetricsFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) + metrics.ApplicationLayerSpamRecordQueueMetricsFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) m.workerPool = worker.NewWorkerPoolBuilder[internal.ReportedMisbehaviorWork]( cfg.Logger, @@ -192,7 +191,7 @@ func NewMisbehaviorReportManager(cfg *MisbehaviorReportManagerConfig, consumer n m.cache = m.cacheFactory( lg, cfg.SpamRecordCacheSize, - networkmetrics.ApplicationLayerSpamRecordCacheMetricFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) + metrics.ApplicationLayerSpamRecordCacheMetricFactory(cfg.HeroCacheMetricsFactory, cfg.NetworkType)) builder := component.NewComponentManagerBuilder() builder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { diff --git a/network/errors.go b/network/errors.go index fd54da920f9..5c4485324e2 100644 --- a/network/errors.go +++ b/network/errors.go @@ -6,7 +6,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) var ( diff --git a/network/internal/p2putils/utils.go b/network/internal/p2putils/utils.go index d5787e5b289..2325df8734a 100644 --- a/network/internal/p2putils/utils.go +++ b/network/internal/p2putils/utils.go @@ -14,8 +14,8 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/network/p2p/keyutils" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" - "github.com/onflow/flow-go/utils/p2plogging" ) // FlowStream returns the Flow protocol Stream in the connection if one exist, else it returns nil diff --git a/network/p2p/blob/blob_service.go b/network/p2p/blob/blob_service.go index 879afaec92c..7f8d06e56c1 100644 --- a/network/p2p/blob/blob_service.go +++ b/network/p2p/blob/blob_service.go @@ -30,8 +30,8 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ipld "github.com/ipfs/go-ipld-format" ) diff --git a/network/p2p/cache/gossipsub_spam_records.go b/network/p2p/cache/gossipsub_spam_records.go index d14b13aed46..265c2befbb7 100644 --- a/network/p2p/cache/gossipsub_spam_records.go +++ b/network/p2p/cache/gossipsub_spam_records.go @@ -13,7 +13,7 @@ import ( "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // GossipSubSpamRecordCache is a cache for storing the gossipsub spam records of peers. It is thread-safe. diff --git a/network/p2p/connection/connection_gater.go b/network/p2p/connection/connection_gater.go index 9f8eb9015c4..4bcfb16c9e0 100644 --- a/network/p2p/connection/connection_gater.go +++ b/network/p2p/connection/connection_gater.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) var _ p2p.ConnectionGater = (*ConnGater)(nil) diff --git a/network/p2p/connection/connection_gater_test.go b/network/p2p/connection/connection_gater_test.go index 2c5b4c04e0b..5a2c678b15c 100644 --- a/network/p2p/connection/connection_gater_test.go +++ b/network/p2p/connection/connection_gater_test.go @@ -21,9 +21,9 @@ import ( "github.com/onflow/flow-go/network/p2p/connection" mockp2p "github.com/onflow/flow-go/network/p2p/mock" p2pconfig "github.com/onflow/flow-go/network/p2p/p2pbuilder/config" + "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/stream" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/connection/connector.go b/network/p2p/connection/connector.go index 7ff75d91103..69fbb5d4359 100644 --- a/network/p2p/connection/connector.go +++ b/network/p2p/connection/connector.go @@ -8,8 +8,8 @@ import ( "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/rand" ) diff --git a/network/p2p/connection/internal/loggerNotifiee.go b/network/p2p/connection/internal/loggerNotifiee.go index 1558a1f4c9b..ce49c6081a8 100644 --- a/network/p2p/connection/internal/loggerNotifiee.go +++ b/network/p2p/connection/internal/loggerNotifiee.go @@ -6,7 +6,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) type LoggerNotifiee struct { diff --git a/network/p2p/connection/peerManager.go b/network/p2p/connection/peerManager.go index 8105bee50de..d8e323813fd 100644 --- a/network/p2p/connection/peerManager.go +++ b/network/p2p/connection/peerManager.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/rand" ) diff --git a/network/p2p/dht/dht.go b/network/p2p/dht/dht.go index 1b522f7a7af..930df0e2251 100644 --- a/network/p2p/dht/dht.go +++ b/network/p2p/dht/dht.go @@ -10,7 +10,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // This produces a new IPFS DHT diff --git a/network/p2p/inspector/validation/control_message_validation_inspector.go b/network/p2p/inspector/validation/control_message_validation_inspector.go index 534b7d70e1e..32f475de8d1 100644 --- a/network/p2p/inspector/validation/control_message_validation_inspector.go +++ b/network/p2p/inspector/validation/control_message_validation_inspector.go @@ -23,10 +23,10 @@ import ( "github.com/onflow/flow-go/network/p2p/inspector/internal/ratelimit" p2pmsg "github.com/onflow/flow-go/network/p2p/message" "github.com/onflow/flow-go/network/p2p/p2pconf" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/state/protocol/events" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" flowrand "github.com/onflow/flow-go/utils/rand" ) diff --git a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go index da12aae4227..a1d55ab873c 100644 --- a/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go +++ b/network/p2p/p2pbuilder/gossipsub/gossipSubBuilder.go @@ -15,7 +15,6 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/mempool/queue" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/distributor" @@ -247,16 +246,16 @@ func defaultInspectorSuite(rpcTracker p2p.RPCControlTracking) p2p.GossipSubRpcIn inspectorCfg.GossipSubRPCMetricsInspectorConfigs.NumberOfWorkers, []queue.HeroStoreConfigOption{ queue.WithHeroStoreSizeLimit(inspectorCfg.GossipSubRPCMetricsInspectorConfigs.CacheSize), - queue.WithHeroStoreCollector(networkmetrics.GossipSubRPCMetricsObserverInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType)), + queue.WithHeroStoreCollector(metrics.GossipSubRPCMetricsObserverInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType)), }...) notificationDistributor := distributor.DefaultGossipSubInspectorNotificationDistributor( logger, []queue.HeroStoreConfigOption{ queue.WithHeroStoreSizeLimit(inspectorCfg.GossipSubRPCInspectorNotificationCacheSize), - queue.WithHeroStoreCollector(networkmetrics.RpcInspectorNotificationQueueMetricFactory(heroCacheMetricsFactory, networkType))}...) + queue.WithHeroStoreCollector(metrics.RpcInspectorNotificationQueueMetricFactory(heroCacheMetricsFactory, networkType))}...) - inspectMsgQueueCacheCollector := networkmetrics.GossipSubRPCInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType) - clusterPrefixedCacheCollector := networkmetrics.GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(heroCacheMetricsFactory, networkType) + inspectMsgQueueCacheCollector := metrics.GossipSubRPCInspectorQueueMetricFactory(heroCacheMetricsFactory, networkType) + clusterPrefixedCacheCollector := metrics.GossipSubRPCInspectorClusterPrefixedCacheMetricFactory(heroCacheMetricsFactory, networkType) rpcValidationInspector, err := validation.NewControlMsgValidationInspector( logger, sporkId, diff --git a/network/p2p/p2pbuilder/utils.go b/network/p2p/p2pbuilder/utils.go index becc7a024ae..ef2a2bc1ae9 100644 --- a/network/p2p/p2pbuilder/utils.go +++ b/network/p2p/p2pbuilder/utils.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) const keyResourceManagerLimit = "libp2p_resource_manager_limit" diff --git a/utils/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go similarity index 90% rename from utils/p2plogging/internal/peerIdCache.go rename to network/p2p/p2plogging/internal/peerIdCache.go index 527d2a71b7b..f5655a73756 100644 --- a/utils/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -2,11 +2,13 @@ package internal import ( "github.com/libp2p/go-libp2p/core/peer" + "github.com/rs/zerolog" "github.com/onflow/flow-go/model/flow" herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" "github.com/onflow/flow-go/module/mempool/stdmap" + "github.com/onflow/flow-go/module/metrics" ) var _ flow.Entity = (*peerIdCacheEntry)(nil) @@ -19,10 +21,12 @@ func NewPeerIdCache(size uint32) *PeerIdCache { return &PeerIdCache{ peerCache: stdmap.NewBackend( stdmap.WithBackData( - herocache.NewCacheWithNoopLoggerAndMetrics( + herocache.NewCache( size, herocache.DefaultOversizeFactor, - heropool.LRUEjection))), + heropool.LRUEjection, + zerolog.Nop(), + metrics.NewNoopCollector()))), } } diff --git a/utils/p2plogging/internal/peerIdCache_test.go b/network/p2p/p2plogging/internal/peerIdCache_test.go similarity index 90% rename from utils/p2plogging/internal/peerIdCache_test.go rename to network/p2p/p2plogging/internal/peerIdCache_test.go index d85246cd6be..13279276a95 100644 --- a/utils/p2plogging/internal/peerIdCache_test.go +++ b/network/p2p/p2plogging/internal/peerIdCache_test.go @@ -5,8 +5,9 @@ import ( "github.com/stretchr/testify/assert" + "github.com/onflow/flow-go/network/internal/p2pfixtures" + "github.com/onflow/flow-go/network/p2p/p2plogging/internal" p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/p2plogging/internal" ) func TestNewPeerIdCache(t *testing.T) { @@ -55,9 +56,9 @@ func TestPeerIdCache_EjectionScenarios(t *testing.T) { assert.Equal(t, uint(0), cache.Size()) // add peer IDs to fill the cache - pid1 := p2ptest.PeerIdFixture(t) - pid2 := p2ptest.PeerIdFixture(t) - pid3 := p2ptest.PeerIdFixture(t) + pid1 := p2pfixtures.PeerIdFixture(t) + pid2 := p2pfixtures.PeerIdFixture(t) + pid3 := p2pfixtures.PeerIdFixture(t) cache.PeerIdString(pid1) assert.Equal(t, uint(1), cache.Size()) @@ -73,7 +74,7 @@ func TestPeerIdCache_EjectionScenarios(t *testing.T) { assert.Equal(t, uint(3), cache.Size()) // add a new peer ID - pid4 := p2ptest.PeerIdFixture(t) + pid4 := p2pfixtures.PeerIdFixture(t) cache.PeerIdString(pid4) assert.Equal(t, uint(3), cache.Size()) diff --git a/utils/p2plogging/logging.go b/network/p2p/p2plogging/logging.go similarity index 92% rename from utils/p2plogging/logging.go rename to network/p2p/p2plogging/logging.go index 165cb538e28..e4f2e93ad7d 100644 --- a/utils/p2plogging/logging.go +++ b/network/p2p/p2plogging/logging.go @@ -3,7 +3,7 @@ package p2plogging import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/onflow/flow-go/utils/p2plogging/internal" + "github.com/onflow/flow-go/network/p2p/p2plogging/internal" ) // peerIdCache is a global cache of peer ids, it is used to avoid expensive base58 encoding of peer ids. diff --git a/utils/p2plogging/logging_test.go b/network/p2p/p2plogging/logging_test.go similarity index 96% rename from utils/p2plogging/logging_test.go rename to network/p2p/p2plogging/logging_test.go index 4c57af84a0f..3717f226631 100644 --- a/utils/p2plogging/logging_test.go +++ b/network/p2p/p2plogging/logging_test.go @@ -6,8 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" + "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/p2pnet/network.go b/network/p2p/p2pnet/network.go index 5daa297fa4b..3b280ecaeae 100644 --- a/network/p2p/p2pnet/network.go +++ b/network/p2p/p2pnet/network.go @@ -31,6 +31,7 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/blob" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet/internal" "github.com/onflow/flow-go/network/p2p/ping" "github.com/onflow/flow-go/network/p2p/subscription" @@ -42,7 +43,6 @@ import ( flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/p2pnode/gossipsubMetrics.go b/network/p2p/p2pnode/gossipsubMetrics.go index b36c6ec4fc4..4a06b7e6e7a 100644 --- a/network/p2p/p2pnode/gossipsubMetrics.go +++ b/network/p2p/p2pnode/gossipsubMetrics.go @@ -7,7 +7,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // GossipSubControlMessageMetrics is a metrics and observability wrapper component for the incoming RPCs to a diff --git a/network/p2p/p2pnode/libp2pNode.go b/network/p2p/p2pnode/libp2pNode.go index baafbc12f80..074d76b45a6 100644 --- a/network/p2p/p2pnode/libp2pNode.go +++ b/network/p2p/p2pnode/libp2pNode.go @@ -25,10 +25,10 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnode/internal" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/p2pnode/libp2pNode_test.go b/network/p2p/p2pnode/libp2pNode_test.go index 9dcc350f044..ad42ec17108 100644 --- a/network/p2p/p2pnode/libp2pNode_test.go +++ b/network/p2p/p2pnode/libp2pNode_test.go @@ -25,12 +25,12 @@ import ( "github.com/onflow/flow-go/network/internal/p2pfixtures" "github.com/onflow/flow-go/network/internal/p2putils" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnode" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/utils" validator "github.com/onflow/flow-go/network/validator/pubsub" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/p2pnode/protocolPeerCache.go b/network/p2p/p2pnode/protocolPeerCache.go index a8861cd7508..125d9aa3b37 100644 --- a/network/p2p/p2pnode/protocolPeerCache.go +++ b/network/p2p/p2pnode/protocolPeerCache.go @@ -11,7 +11,7 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/rs/zerolog" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // ProtocolPeerCache store a mapping from protocol ID to peers who support that protocol diff --git a/network/p2p/scoring/registry.go b/network/p2p/scoring/registry.go index 7705ed45c86..a1174e165b9 100644 --- a/network/p2p/scoring/registry.go +++ b/network/p2p/scoring/registry.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/network/p2p" netcache "github.com/onflow/flow-go/network/p2p/cache" p2pmsg "github.com/onflow/flow-go/network/p2p/message" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/test/topic_validator_test.go b/network/p2p/test/topic_validator_test.go index 3ea250cba6c..21fd328cf1a 100644 --- a/network/p2p/test/topic_validator_test.go +++ b/network/p2p/test/topic_validator_test.go @@ -24,13 +24,13 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" "github.com/onflow/flow-go/network/p2p/translator" "github.com/onflow/flow-go/network/p2p/utils" "github.com/onflow/flow-go/network/slashing" "github.com/onflow/flow-go/network/validator" flowpubsub "github.com/onflow/flow-go/network/validator/pubsub" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/p2p/tracer/gossipSubMeshTracer.go b/network/p2p/tracer/gossipSubMeshTracer.go index bd3420ada5f..cbd3d18d409 100644 --- a/network/p2p/tracer/gossipSubMeshTracer.go +++ b/network/p2p/tracer/gossipSubMeshTracer.go @@ -14,12 +14,11 @@ import ( "github.com/onflow/flow-go/module/component" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/metrics/network" "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/tracer/internal" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( @@ -81,8 +80,8 @@ func NewGossipSubMeshTracer(config *GossipSubMeshTracerConfig) *GossipSubMeshTra rpcSentTracker := internal.NewRPCSentTracker(&internal.RPCSentTrackerConfig{ Logger: lg, RPCSentCacheSize: config.RpcSentTrackerCacheSize, - RPCSentCacheCollector: networkmetrics.GossipSubRPCSentTrackerMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), - WorkerQueueCacheCollector: networkmetrics.GossipSubRPCSentTrackerQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + RPCSentCacheCollector: metrics.GossipSubRPCSentTrackerMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), + WorkerQueueCacheCollector: metrics.GossipSubRPCSentTrackerQueueMetricFactory(config.HeroCacheMetricsFactory, config.NetworkingType), WorkerQueueCacheSize: config.RpcSentTrackerWorkerQueueCacheSize, NumOfWorkers: config.RpcSentTrackerNumOfWorkers, LastHighestIhavesSentResetInterval: defaultLastHighestIHaveRPCSizeResetInterval, diff --git a/network/p2p/tracer/gossipSubScoreTracer.go b/network/p2p/tracer/gossipSubScoreTracer.go index 1c1568f1f24..b28189ec624 100644 --- a/network/p2p/tracer/gossipSubScoreTracer.go +++ b/network/p2p/tracer/gossipSubScoreTracer.go @@ -12,8 +12,8 @@ import ( "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/translator/identity_provider_translator.go b/network/p2p/translator/identity_provider_translator.go index 5e109e23145..8156f2e22a2 100644 --- a/network/p2p/translator/identity_provider_translator.go +++ b/network/p2p/translator/identity_provider_translator.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // IdentityProviderIDTranslator implements an `p2p.IDTranslator` which provides ID diff --git a/network/p2p/unicast/errors.go b/network/p2p/unicast/errors.go index 817560e1b8f..d8abb2624f7 100644 --- a/network/p2p/unicast/errors.go +++ b/network/p2p/unicast/errors.go @@ -6,7 +6,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // ErrDialInProgress indicates that the libp2p node is currently dialing the peer. diff --git a/network/p2p/unicast/manager.go b/network/p2p/unicast/manager.go index 895262fb59a..a03f5f1de2b 100644 --- a/network/p2p/unicast/manager.go +++ b/network/p2p/unicast/manager.go @@ -17,9 +17,9 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/unicast/protocols" "github.com/onflow/flow-go/network/p2p/unicast/stream" - "github.com/onflow/flow-go/utils/p2plogging" ) const ( diff --git a/network/p2p/unicast/stream/errors.go b/network/p2p/unicast/stream/errors.go index f7f2f187c3b..9c73294c52b 100644 --- a/network/p2p/unicast/stream/errors.go +++ b/network/p2p/unicast/stream/errors.go @@ -7,7 +7,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) // ErrSecurityProtocolNegotiationFailed indicates security protocol negotiation failed during the stream factory connect attempt. diff --git a/network/test/unicast_authorization_test.go b/network/test/unicast_authorization_test.go index 4d25481b3d8..b9cec9ba8b0 100644 --- a/network/test/unicast_authorization_test.go +++ b/network/test/unicast_authorization_test.go @@ -23,9 +23,9 @@ import ( "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/mocknetwork" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/p2p/p2pnet" "github.com/onflow/flow-go/network/validator" - "github.com/onflow/flow-go/utils/p2plogging" "github.com/onflow/flow-go/utils/unittest" ) diff --git a/network/validator/authorized_sender_validator.go b/network/validator/authorized_sender_validator.go index 6fb2f195b34..69d925661a1 100644 --- a/network/validator/authorized_sender_validator.go +++ b/network/validator/authorized_sender_validator.go @@ -13,7 +13,7 @@ import ( "github.com/onflow/flow-go/network/codec" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/utils/p2plogging" + "github.com/onflow/flow-go/network/p2p/p2plogging" ) var ( diff --git a/network/validator/pubsub/topic_validator.go b/network/validator/pubsub/topic_validator.go index d92b84e327f..078f9272b12 100644 --- a/network/validator/pubsub/topic_validator.go +++ b/network/validator/pubsub/topic_validator.go @@ -12,10 +12,10 @@ import ( "github.com/onflow/flow-go/network/channels" "github.com/onflow/flow-go/network/message" "github.com/onflow/flow-go/network/p2p" + "github.com/onflow/flow-go/network/p2p/p2plogging" "github.com/onflow/flow-go/network/validator" _ "github.com/onflow/flow-go/utils/binstat" "github.com/onflow/flow-go/utils/logging" - "github.com/onflow/flow-go/utils/p2plogging" ) // messagePubKey extracts the public key of the envelope signer from a libp2p message. diff --git a/utils/unittest/fixtures.go b/utils/unittest/fixtures.go index ab6a2770dcf..331f2ecb674 100644 --- a/utils/unittest/fixtures.go +++ b/utils/unittest/fixtures.go @@ -16,7 +16,6 @@ import ( "github.com/onflow/cadence" sdk "github.com/onflow/flow-go-sdk" - "github.com/onflow/flow-go/module/executiondatasync/execution_data/model" "github.com/onflow/flow-go/network/message" hotstuff "github.com/onflow/flow-go/consensus/hotstuff/model" @@ -2433,22 +2432,22 @@ func GetFlowProtocolEventID( return flow.HashToID(eventIDHash) } -func WithBlockExecutionDataBlockID(blockID flow.Identifier) func(*model.BlockExecutionData) { - return func(bed *model.BlockExecutionData) { +func WithBlockExecutionDataBlockID(blockID flow.Identifier) func(*execution_data.BlockExecutionData) { + return func(bed *execution_data.BlockExecutionData) { bed.BlockID = blockID } } -func WithChunkExecutionDatas(chunks ...*model.ChunkExecutionData) func(*model.BlockExecutionData) { - return func(bed *model.BlockExecutionData) { +func WithChunkExecutionDatas(chunks ...*execution_data.ChunkExecutionData) func(*execution_data.BlockExecutionData) { + return func(bed *execution_data.BlockExecutionData) { bed.ChunkExecutionDatas = chunks } } -func BlockExecutionDataFixture(opts ...func(*model.BlockExecutionData)) *model.BlockExecutionData { - bed := &model.BlockExecutionData{ +func BlockExecutionDataFixture(opts ...func(*execution_data.BlockExecutionData)) *execution_data.BlockExecutionData { + bed := &execution_data.BlockExecutionData{ BlockID: IdentifierFixture(), - ChunkExecutionDatas: []*model.ChunkExecutionData{}, + ChunkExecutionDatas: []*execution_data.ChunkExecutionData{}, } for _, opt := range opts { @@ -2458,13 +2457,13 @@ func BlockExecutionDataFixture(opts ...func(*model.BlockExecutionData)) *model.B return bed } -func BlockExecutionDatEntityFixture(opts ...func(*model.BlockExecutionData)) *model.BlockExecutionDataEntity { +func BlockExecutionDatEntityFixture(opts ...func(*execution_data.BlockExecutionData)) *execution_data.BlockExecutionDataEntity { execData := BlockExecutionDataFixture(opts...) - return model.NewBlockExecutionDataEntity(IdentifierFixture(), execData) + return execution_data.NewBlockExecutionDataEntity(IdentifierFixture(), execData) } -func BlockExecutionDatEntityListFixture(n int) []*model.BlockExecutionDataEntity { - l := make([]*model.BlockExecutionDataEntity, n) +func BlockExecutionDatEntityListFixture(n int) []*execution_data.BlockExecutionDataEntity { + l := make([]*execution_data.BlockExecutionDataEntity, n) for i := 0; i < n; i++ { l[i] = BlockExecutionDatEntityFixture() } @@ -2472,21 +2471,21 @@ func BlockExecutionDatEntityListFixture(n int) []*model.BlockExecutionDataEntity return l } -func WithChunkEvents(events flow.EventsList) func(*model.ChunkExecutionData) { - return func(conf *model.ChunkExecutionData) { +func WithChunkEvents(events flow.EventsList) func(*execution_data.ChunkExecutionData) { + return func(conf *execution_data.ChunkExecutionData) { conf.Events = events } } -func WithTrieUpdate(trieUpdate *ledger.TrieUpdate) func(*model.ChunkExecutionData) { - return func(conf *model.ChunkExecutionData) { +func WithTrieUpdate(trieUpdate *ledger.TrieUpdate) func(*execution_data.ChunkExecutionData) { + return func(conf *execution_data.ChunkExecutionData) { conf.TrieUpdate = trieUpdate } } -func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*model.ChunkExecutionData)) *model.ChunkExecutionData { +func ChunkExecutionDataFixture(t *testing.T, minSize int, opts ...func(*execution_data.ChunkExecutionData)) *execution_data.ChunkExecutionData { collection := CollectionFixture(5) - ced := &model.ChunkExecutionData{ + ced := &execution_data.ChunkExecutionData{ Collection: &collection, Events: flow.EventsList{}, TrieUpdate: testutils.TrieUpdateFixture(2, 1, 8), From 2b995d4baa28f762c819d4f244269f038919ea27 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:31:37 -0700 Subject: [PATCH 21/25] replaces herocache with lru --- .../p2p/p2plogging/internal/peerIdCache.go | 70 +++++-------------- network/p2p/p2plogging/logging_test.go | 5 +- 2 files changed, 21 insertions(+), 54 deletions(-) diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go index f5655a73756..e8df8c8722a 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -1,77 +1,45 @@ package internal import ( - "github.com/libp2p/go-libp2p/core/peer" - "github.com/rs/zerolog" + "fmt" - "github.com/onflow/flow-go/model/flow" - herocache "github.com/onflow/flow-go/module/mempool/herocache/backdata" - "github.com/onflow/flow-go/module/mempool/herocache/backdata/heropool" - "github.com/onflow/flow-go/module/mempool/stdmap" - "github.com/onflow/flow-go/module/metrics" + lru "github.com/hashicorp/golang-lru" + "github.com/libp2p/go-libp2p/core/peer" ) -var _ flow.Entity = (*peerIdCacheEntry)(nil) - type PeerIdCache struct { - peerCache *stdmap.Backend + peerCache *lru.Cache } func NewPeerIdCache(size uint32) *PeerIdCache { + c, err := lru.New(int(size)) + if err != nil { + panic(fmt.Sprintf("failed to create lru cache for peer ids: %v", err)) + } return &PeerIdCache{ - peerCache: stdmap.NewBackend( - stdmap.WithBackData( - herocache.NewCache( - size, - herocache.DefaultOversizeFactor, - heropool.LRUEjection, - zerolog.Nop(), - metrics.NewNoopCollector()))), + peerCache: c, } } func (p *PeerIdCache) PeerIdString(pid peer.ID) string { - id := flow.MakeIDFromFingerPrint([]byte(pid)) - pidEntity, ok := p.peerCache.ByID(id) + pidStr, ok := p.peerCache.Get(pid) if ok { - // return the cached peer id string - return pidEntity.(peerIdCacheEntry).Str - } - pidEntity = peerIdCacheEntry{ - id: id, - Pid: pid, - Str: pid.String(), + return pidStr.(string) } - p.peerCache.Add(pidEntity) - - return pidEntity.(peerIdCacheEntry).Str + pidStr0 := pid.String() + p.peerCache.Add(pid, pidStr0) + return pidStr0 } -func (p *PeerIdCache) Size() uint { - return p.peerCache.Size() +func (p *PeerIdCache) Size() int { + return p.peerCache.Len() } func (p *PeerIdCache) ByPeerId(pid peer.ID) (peer.ID, bool) { - id := flow.MakeIDFromFingerPrint([]byte(pid)) - pidEntity, ok := p.peerCache.ByID(id) + pidStr, ok := p.peerCache.Get(pid) if ok { - // return the cached peer id - return pidEntity.(peerIdCacheEntry).Pid, true + return pidStr.(peer.ID), ok } - return "", false -} - -type peerIdCacheEntry struct { - id flow.Identifier // cache the id for fast lookup - Pid peer.ID // peer id - Str string // base58 encoded peer id string -} - -func (p peerIdCacheEntry) ID() flow.Identifier { - return p.id -} - -func (p peerIdCacheEntry) Checksum() flow.Identifier { - return p.id + return "", ok } diff --git a/network/p2p/p2plogging/logging_test.go b/network/p2p/p2plogging/logging_test.go index 3717f226631..e0f7f322990 100644 --- a/network/p2p/p2plogging/logging_test.go +++ b/network/p2p/p2plogging/logging_test.go @@ -8,7 +8,6 @@ import ( "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" - "github.com/onflow/flow-go/utils/unittest" ) // TestPeerIdLogging checks the end-to-end functionality of the PeerId logger helper. @@ -21,7 +20,7 @@ func TestPeerIdLogging(t *testing.T) { // BenchmarkPeerIdString benchmarks the peer.ID.String() method. func BenchmarkPeerIdString(b *testing.B) { - unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id string benchmarking, set environment variable to enable") + // unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id string benchmarking, set environment variable to enable") count := 100 pids := make([]peer.ID, 0, count) @@ -38,7 +37,7 @@ func BenchmarkPeerIdString(b *testing.B) { // BenchmarkPeerIdLogging benchmarks the PeerId logger helper, which is expected to be faster than the peer.ID.String() method, // as it caches the base58 encoded peer ID strings. func BenchmarkPeerIdLogging(b *testing.B) { - unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id logging benchmarking, set environment variable to enable") + // unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id logging benchmarking, set environment variable to enable") count := 100 pids := make([]peer.ID, 0, count) From c364f54899d8d650ee504e940f9f0b3a6f80a0bb Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:43:32 -0700 Subject: [PATCH 22/25] fixes tests --- .../p2p/p2plogging/internal/peerIdCache.go | 10 ++-- .../p2plogging/internal/peerIdCache_test.go | 46 +++++++++---------- network/p2p/p2plogging/logging_test.go | 5 +- 3 files changed, 31 insertions(+), 30 deletions(-) diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go index e8df8c8722a..91893f400d3 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -11,8 +11,8 @@ type PeerIdCache struct { peerCache *lru.Cache } -func NewPeerIdCache(size uint32) *PeerIdCache { - c, err := lru.New(int(size)) +func NewPeerIdCache(size int) *PeerIdCache { + c, err := lru.New(size) if err != nil { panic(fmt.Sprintf("failed to create lru cache for peer ids: %v", err)) } @@ -36,10 +36,10 @@ func (p *PeerIdCache) Size() int { return p.peerCache.Len() } -func (p *PeerIdCache) ByPeerId(pid peer.ID) (peer.ID, bool) { +func (p *PeerIdCache) ByPeerId(pid peer.ID) (string, bool) { pidStr, ok := p.peerCache.Get(pid) if ok { - return pidStr.(peer.ID), ok + return pidStr.(string), true } - return "", ok + return "", false } diff --git a/network/p2p/p2plogging/internal/peerIdCache_test.go b/network/p2p/p2plogging/internal/peerIdCache_test.go index 13279276a95..ab0158e4641 100644 --- a/network/p2p/p2plogging/internal/peerIdCache_test.go +++ b/network/p2p/p2plogging/internal/peerIdCache_test.go @@ -11,13 +11,13 @@ import ( ) func TestNewPeerIdCache(t *testing.T) { - cacheSize := uint32(100) + cacheSize := 100 cache := internal.NewPeerIdCache(cacheSize) assert.NotNil(t, cache) } func TestPeerIdCache_PeerIdString(t *testing.T) { - cacheSize := uint32(100) + cacheSize := 100 cache := internal.NewPeerIdCache(cacheSize) t.Run("existing peer ID", func(t *testing.T) { @@ -26,9 +26,9 @@ func TestPeerIdCache_PeerIdString(t *testing.T) { assert.NotEmpty(t, pidStr) assert.Equal(t, pid.String(), pidStr) - gotPid, ok := cache.ByPeerId(pid) + gotPidStr, ok := cache.ByPeerId(pid) assert.True(t, ok, "expected pid to be in the cache") - assert.Equal(t, pid.String(), gotPid.String()) + assert.Equal(t, pid.String(), gotPidStr) }) t.Run("non-existing peer ID", func(t *testing.T) { @@ -40,20 +40,20 @@ func TestPeerIdCache_PeerIdString(t *testing.T) { assert.NotEmpty(t, pidStr) assert.Equal(t, pid2.String(), pidStr) - gotPid, ok := cache.ByPeerId(pid2) + gotPidStr, ok := cache.ByPeerId(pid2) assert.True(t, ok, "expected pid to be in the cache") - assert.Equal(t, pid2.String(), gotPid.String()) + assert.Equal(t, pid2.String(), gotPidStr) - gotPid, ok = cache.ByPeerId(pid1) + gotPidStr, ok = cache.ByPeerId(pid1) assert.True(t, ok, "expected pid to be in the cache") - assert.Equal(t, pid1.String(), gotPid.String()) + assert.Equal(t, pid1.String(), gotPidStr) }) } func TestPeerIdCache_EjectionScenarios(t *testing.T) { - cacheSize := uint32(3) + cacheSize := 3 cache := internal.NewPeerIdCache(cacheSize) - assert.Equal(t, uint(0), cache.Size()) + assert.Equal(t, 0, cache.Size()) // add peer IDs to fill the cache pid1 := p2pfixtures.PeerIdFixture(t) @@ -61,38 +61,38 @@ func TestPeerIdCache_EjectionScenarios(t *testing.T) { pid3 := p2pfixtures.PeerIdFixture(t) cache.PeerIdString(pid1) - assert.Equal(t, uint(1), cache.Size()) + assert.Equal(t, 1, cache.Size()) cache.PeerIdString(pid2) - assert.Equal(t, uint(2), cache.Size()) + assert.Equal(t, 2, cache.Size()) cache.PeerIdString(pid3) - assert.Equal(t, uint(3), cache.Size()) + assert.Equal(t, 3, cache.Size()) // check that all peer IDs are in the cache assert.Equal(t, pid1.String(), cache.PeerIdString(pid1)) assert.Equal(t, pid2.String(), cache.PeerIdString(pid2)) assert.Equal(t, pid3.String(), cache.PeerIdString(pid3)) - assert.Equal(t, uint(3), cache.Size()) + assert.Equal(t, 3, cache.Size()) // add a new peer ID pid4 := p2pfixtures.PeerIdFixture(t) cache.PeerIdString(pid4) - assert.Equal(t, uint(3), cache.Size()) + assert.Equal(t, 3, cache.Size()) // check that pid1 is now the one that has been evicted - gotId1, ok := cache.ByPeerId(pid1) + gotId1Str, ok := cache.ByPeerId(pid1) assert.False(t, ok, "expected pid1 to be evicted") - assert.Equal(t, "", gotId1.String()) + assert.Equal(t, "", gotId1Str) // confirm other peer IDs are still in the cache - gotId2, ok := cache.ByPeerId(pid2) + gotId2Str, ok := cache.ByPeerId(pid2) assert.True(t, ok, "expected pid2 to be in the cache") - assert.Equal(t, pid2.String(), gotId2.String()) + assert.Equal(t, pid2.String(), gotId2Str) - gotId3, ok := cache.ByPeerId(pid3) + gotId3Str, ok := cache.ByPeerId(pid3) assert.True(t, ok, "expected pid3 to be in the cache") - assert.Equal(t, pid3.String(), gotId3.String()) + assert.Equal(t, pid3.String(), gotId3Str) - gotId4, ok := cache.ByPeerId(pid4) + gotId4Str, ok := cache.ByPeerId(pid4) assert.True(t, ok, "expected pid4 to be in the cache") - assert.Equal(t, pid4.String(), gotId4.String()) + assert.Equal(t, pid4.String(), gotId4Str) } diff --git a/network/p2p/p2plogging/logging_test.go b/network/p2p/p2plogging/logging_test.go index e0f7f322990..3717f226631 100644 --- a/network/p2p/p2plogging/logging_test.go +++ b/network/p2p/p2plogging/logging_test.go @@ -8,6 +8,7 @@ import ( "github.com/onflow/flow-go/network/p2p/p2plogging" p2ptest "github.com/onflow/flow-go/network/p2p/test" + "github.com/onflow/flow-go/utils/unittest" ) // TestPeerIdLogging checks the end-to-end functionality of the PeerId logger helper. @@ -20,7 +21,7 @@ func TestPeerIdLogging(t *testing.T) { // BenchmarkPeerIdString benchmarks the peer.ID.String() method. func BenchmarkPeerIdString(b *testing.B) { - // unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id string benchmarking, set environment variable to enable") + unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id string benchmarking, set environment variable to enable") count := 100 pids := make([]peer.ID, 0, count) @@ -37,7 +38,7 @@ func BenchmarkPeerIdString(b *testing.B) { // BenchmarkPeerIdLogging benchmarks the PeerId logger helper, which is expected to be faster than the peer.ID.String() method, // as it caches the base58 encoded peer ID strings. func BenchmarkPeerIdLogging(b *testing.B) { - // unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id logging benchmarking, set environment variable to enable") + unittest.SkipBenchmarkUnless(b, unittest.BENCHMARK_EXPERIMENT, "skips peer id logging benchmarking, set environment variable to enable") count := 100 pids := make([]peer.ID, 0, count) From 1a5b376f8a9c3df8e5ef4bbb577a8689cb400953 Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 15:47:34 -0700 Subject: [PATCH 23/25] adds a todo --- network/p2p/p2plogging/internal/peerIdCache.go | 11 +++++++++++ network/p2p/p2plogging/internal/peerIdCache_test.go | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go index 91893f400d3..037c42437c9 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -8,6 +8,11 @@ import ( ) type PeerIdCache struct { + // TODO: Note that we use lru.Cache as there is an inherent import cycle when using the HeroCache. + // Moving forward we should consider moving the HeroCache to a separate repository and transition + // to using it here. + // This PeerIdCache is using extensively across the codebase, so any minor import cycle will cause + // a lot of trouble. peerCache *lru.Cache } @@ -21,6 +26,9 @@ func NewPeerIdCache(size int) *PeerIdCache { } } +// PeerIdString returns the base58 encoded peer id string, it looks up the peer id in a cache to avoid +// expensive base58 encoding, and caches the result for future use in case of a cache miss. +// It is safe to call this method concurrently. func (p *PeerIdCache) PeerIdString(pid peer.ID) string { pidStr, ok := p.peerCache.Get(pid) if ok { @@ -32,10 +40,13 @@ func (p *PeerIdCache) PeerIdString(pid peer.ID) string { return pidStr0 } +// Size returns the number of entries in the cache; it is mainly used for testing. func (p *PeerIdCache) Size() int { return p.peerCache.Len() } +// ByPeerId returns the base58 encoded peer id string by directly looking up the peer id in the cache. It is only +// used for testing and since this is an internal package, it is not exposed to the outside world. func (p *PeerIdCache) ByPeerId(pid peer.ID) (string, bool) { pidStr, ok := p.peerCache.Get(pid) if ok { diff --git a/network/p2p/p2plogging/internal/peerIdCache_test.go b/network/p2p/p2plogging/internal/peerIdCache_test.go index ab0158e4641..dae444cf78b 100644 --- a/network/p2p/p2plogging/internal/peerIdCache_test.go +++ b/network/p2p/p2plogging/internal/peerIdCache_test.go @@ -10,12 +10,16 @@ import ( p2ptest "github.com/onflow/flow-go/network/p2p/test" ) +// TestNewPeerIdCache tests the basic functionality of the peer ID cache. It ensures that the cache +// is created successfully. func TestNewPeerIdCache(t *testing.T) { cacheSize := 100 cache := internal.NewPeerIdCache(cacheSize) assert.NotNil(t, cache) } +// TestPeerIdCache_PeerIdString tests the basic functionality of the peer ID cache. It ensures that the cache +// returns the same string as the peer.ID.String() method. func TestPeerIdCache_PeerIdString(t *testing.T) { cacheSize := 100 cache := internal.NewPeerIdCache(cacheSize) @@ -50,6 +54,8 @@ func TestPeerIdCache_PeerIdString(t *testing.T) { }) } +// TestPeerIdCache_EjectionScenarios tests the eviction logic of the peer ID cache. It ensures that the cache +// evicts the least recently added peer ID when the cache is full. func TestPeerIdCache_EjectionScenarios(t *testing.T) { cacheSize := 3 cache := internal.NewPeerIdCache(cacheSize) From 661f278e3d532a821f655f43e447d8682085692c Mon Sep 17 00:00:00 2001 From: "Yahya Hassanzadeh, Ph.D" Date: Mon, 11 Sep 2023 16:36:59 -0700 Subject: [PATCH 24/25] Update network/p2p/p2plogging/internal/peerIdCache.go Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- network/p2p/p2plogging/internal/peerIdCache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go index 037c42437c9..00f42ad7f2e 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -11,7 +11,7 @@ type PeerIdCache struct { // TODO: Note that we use lru.Cache as there is an inherent import cycle when using the HeroCache. // Moving forward we should consider moving the HeroCache to a separate repository and transition // to using it here. - // This PeerIdCache is using extensively across the codebase, so any minor import cycle will cause + // This PeerIdCache is used extensively across the codebase, so any minor import cycle will cause // a lot of trouble. peerCache *lru.Cache } From 20c50ca29cbf581e458966ab4343260b3fefc7aa Mon Sep 17 00:00:00 2001 From: Yahya Hassanzadeh Date: Mon, 11 Sep 2023 16:39:18 -0700 Subject: [PATCH 25/25] returns an error for initializing cache --- network/p2p/p2plogging/internal/peerIdCache.go | 6 +++--- network/p2p/p2plogging/internal/peerIdCache_test.go | 9 ++++++--- network/p2p/p2plogging/logging.go | 6 +++++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/network/p2p/p2plogging/internal/peerIdCache.go b/network/p2p/p2plogging/internal/peerIdCache.go index 037c42437c9..edb6b9eec68 100644 --- a/network/p2p/p2plogging/internal/peerIdCache.go +++ b/network/p2p/p2plogging/internal/peerIdCache.go @@ -16,14 +16,14 @@ type PeerIdCache struct { peerCache *lru.Cache } -func NewPeerIdCache(size int) *PeerIdCache { +func NewPeerIdCache(size int) (*PeerIdCache, error) { c, err := lru.New(size) if err != nil { - panic(fmt.Sprintf("failed to create lru cache for peer ids: %v", err)) + return nil, fmt.Errorf("failed to create peer id cache: %w", err) } return &PeerIdCache{ peerCache: c, - } + }, nil } // PeerIdString returns the base58 encoded peer id string, it looks up the peer id in a cache to avoid diff --git a/network/p2p/p2plogging/internal/peerIdCache_test.go b/network/p2p/p2plogging/internal/peerIdCache_test.go index dae444cf78b..6d9dcfc01f9 100644 --- a/network/p2p/p2plogging/internal/peerIdCache_test.go +++ b/network/p2p/p2plogging/internal/peerIdCache_test.go @@ -14,7 +14,8 @@ import ( // is created successfully. func TestNewPeerIdCache(t *testing.T) { cacheSize := 100 - cache := internal.NewPeerIdCache(cacheSize) + cache, err := internal.NewPeerIdCache(cacheSize) + assert.NoError(t, err) assert.NotNil(t, cache) } @@ -22,7 +23,8 @@ func TestNewPeerIdCache(t *testing.T) { // returns the same string as the peer.ID.String() method. func TestPeerIdCache_PeerIdString(t *testing.T) { cacheSize := 100 - cache := internal.NewPeerIdCache(cacheSize) + cache, err := internal.NewPeerIdCache(cacheSize) + assert.NoError(t, err) t.Run("existing peer ID", func(t *testing.T) { pid := p2ptest.PeerIdFixture(t) @@ -58,7 +60,8 @@ func TestPeerIdCache_PeerIdString(t *testing.T) { // evicts the least recently added peer ID when the cache is full. func TestPeerIdCache_EjectionScenarios(t *testing.T) { cacheSize := 3 - cache := internal.NewPeerIdCache(cacheSize) + cache, err := internal.NewPeerIdCache(cacheSize) + assert.NoError(t, err) assert.Equal(t, 0, cache.Size()) // add peer IDs to fill the cache diff --git a/network/p2p/p2plogging/logging.go b/network/p2p/p2plogging/logging.go index e4f2e93ad7d..324e7677dd3 100644 --- a/network/p2p/p2plogging/logging.go +++ b/network/p2p/p2plogging/logging.go @@ -13,7 +13,11 @@ var peerIdCache *internal.PeerIdCache // the peer id cache before any other code is run, so that the cache is ready // to use. func init() { - peerIdCache = internal.NewPeerIdCache(10_000) + cache, err := internal.NewPeerIdCache(10_000) + if err != nil { + panic(err) + } + peerIdCache = cache } // PeerId is a logger helper that returns the base58 encoded peer id string, it looks up the peer id in a cache to avoid