Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

uses MAX_LEADER_SCHEDULE_STAKES for cluster-nodes cache capacity #2151

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions turbine/src/broadcast_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use {
solana_measure::measure::Measure,
solana_metrics::{inc_new_counter_error, inc_new_counter_info},
solana_poh::poh_recorder::WorkingBankEntry,
solana_runtime::bank_forks::BankForks,
solana_runtime::{bank::MAX_LEADER_SCHEDULE_STAKES, bank_forks::BankForks},
solana_sdk::{
clock::Slot,
pubkey::Pubkey,
Expand All @@ -31,6 +31,7 @@ use {
sendmmsg::{batch_send, SendPktsError},
socket::SocketAddrSpace,
},
static_assertions::const_assert_eq,
std::{
collections::{HashMap, HashSet},
net::{SocketAddr, UdpSocket},
Expand All @@ -52,7 +53,8 @@ pub(crate) mod broadcast_utils;
mod fail_entry_verification_broadcast_run;
mod standard_broadcast_run;

const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
const_assert_eq!(CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, 5);

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why the assert? Are you worried about max leader schedule stakes adjusting up? If so, maybe we should min this with some constant cap?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just as an explicit signal that if that constant is changed, it has ramifications here.
Also serves as a documentation that what the capacity actually is.

const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = MAX_LEADER_SCHEDULE_STAKES as usize;
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);

pub(crate) type RecordReceiver = Receiver<(Arc<Vec<Shred>>, Option<BroadcastShredBatchInfo>)>;
Expand Down
9 changes: 7 additions & 2 deletions turbine/src/retransmit_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,16 @@ use {
solana_rayon_threadlimit::get_thread_count,
solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions},
solana_rpc_client_api::response::SlotUpdate,
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_runtime::{
bank::{Bank, MAX_LEADER_SCHEDULE_STAKES},
bank_forks::BankForks,
},
solana_sdk::{clock::Slot, pubkey::Pubkey, timing::timestamp},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
static_assertions::const_assert_eq,
std::{
collections::HashMap,
iter::repeat,
Expand All @@ -47,7 +51,8 @@ const DEDUPER_RESET_CYCLE: Duration = Duration::from_secs(5 * 60);
// Minimum number of shreds to use rayon parallel iterators.
const PAR_ITER_MIN_NUM_SHREDS: usize = 2;

const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
const_assert_eq!(CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, 5);
const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = MAX_LEADER_SCHEDULE_STAKES as usize;
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);

#[derive(Default)]
Expand Down