Skip to content

Commit

Permalink
Merge branch 'unstable' into spellcheck
Browse files Browse the repository at this point in the history
  • Loading branch information
chong-he authored Dec 10, 2024
2 parents 367f2b1 + c5a48a9 commit 0882e27
Show file tree
Hide file tree
Showing 58 changed files with 183 additions and 141 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/local-testnet.yml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ jobs:
run: |
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
sudo apt update
sudo apt install -y kurtosis-cli=1.3.1
sudo apt install -y kurtosis-cli
kurtosis analytics disable
- name: Download Docker image artifact
Expand Down Expand Up @@ -86,7 +86,7 @@ jobs:
run: |
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
sudo apt update
sudo apt install -y kurtosis-cli=1.3.1
sudo apt install -y kurtosis-cli
kurtosis analytics disable
- name: Download Docker image artifact
Expand Down Expand Up @@ -121,7 +121,7 @@ jobs:
run: |
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
sudo apt update
sudo apt install -y kurtosis-cli=1.3.1
sudo apt install -y kurtosis-cli
kurtosis analytics disable
- name: Download Docker image artifact
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/test-suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,11 @@ jobs:
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v4
# Set Java version to 21. (required since Web3Signer 24.12.0).
- uses: actions/setup-java@v4
with:
distribution: 'temurin'
java-version: '21'
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == 'false'
uses: moonrepo/setup-rust@v1
Expand Down
44 changes: 27 additions & 17 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 8 additions & 1 deletion FUNDING.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,13 @@
"drips": {
"ethereum": {
"ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b"
},
"filecoin": {
"ownedBy": "0x25c4a76E7d118705e7Ea2e9b7d8C59930d8aCD3b"
}
},
"opRetro": {
"projectId": "0x04b1cd5a7c59117474ce414b309fa48e985bdaab4b0dab72045f74d04ebd8cff"
}
}
}

2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ install-audit:
cargo install --force cargo-audit

audit-CI:
cargo audit
cargo audit --ignore RUSTSEC-2024-0421

# Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose.
vendor:
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "beacon_node"
version = "5.3.0"
version = "6.0.0"
authors = [
"Paul Hauner <[email protected]>",
"Age Manning <[email protected]",
Expand Down
10 changes: 5 additions & 5 deletions beacon_node/beacon_chain/src/attestation_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ pub struct VerifiedAggregatedAttestation<'a, T: BeaconChainTypes> {
indexed_attestation: IndexedAttestation<T::EthSpec>,
}

impl<'a, T: BeaconChainTypes> VerifiedAggregatedAttestation<'a, T> {
impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<'_, T> {
pub fn into_indexed_attestation(self) -> IndexedAttestation<T::EthSpec> {
self.indexed_attestation
}
Expand All @@ -319,15 +319,15 @@ pub struct VerifiedUnaggregatedAttestation<'a, T: BeaconChainTypes> {
subnet_id: SubnetId,
}

impl<'a, T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'a, T> {
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<'_, T> {
pub fn into_indexed_attestation(self) -> IndexedAttestation<T::EthSpec> {
self.indexed_attestation
}
}

/// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive
/// macro.
impl<'a, T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'a, T> {
impl<T: BeaconChainTypes> Clone for IndexedUnaggregatedAttestation<'_, T> {
fn clone(&self) -> Self {
Self {
attestation: self.attestation,
Expand All @@ -353,7 +353,7 @@ pub trait VerifiedAttestation<T: BeaconChainTypes>: Sized {
}
}

impl<'a, T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedAggregatedAttestation<'a, T> {
impl<T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedAggregatedAttestation<'_, T> {
fn attestation(&self) -> AttestationRef<T::EthSpec> {
self.attestation()
}
Expand All @@ -363,7 +363,7 @@ impl<'a, T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedAggregatedAttes
}
}

impl<'a, T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedUnaggregatedAttestation<'a, T> {
impl<T: BeaconChainTypes> VerifiedAttestation<T> for VerifiedUnaggregatedAttestation<'_, T> {
fn attestation(&self) -> AttestationRef<T::EthSpec> {
self.attestation
}
Expand Down
2 changes: 2 additions & 0 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1112,6 +1112,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// ## Errors
///
/// May return a database error.
#[allow(clippy::type_complexity)]
pub fn get_blocks_checking_caches(
self: &Arc<Self>,
block_roots: Vec<Hash256>,
Expand All @@ -1127,6 +1128,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(BeaconBlockStreamer::<T>::new(self, CheckCaches::Yes)?.launch_stream(block_roots))
}

#[allow(clippy::type_complexity)]
pub fn get_blocks(
self: &Arc<Self>,
block_roots: Vec<Hash256>,
Expand Down
1 change: 1 addition & 0 deletions beacon_node/beacon_chain/src/block_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2072,6 +2072,7 @@ pub fn get_validator_pubkey_cache<T: BeaconChainTypes>(
///
/// The signature verifier is empty because it does not yet have any of this block's signatures
/// added to it. Use `Self::apply_to_signature_verifier` to apply the signatures.
#[allow(clippy::type_complexity)]
fn get_signature_verifier<'a, T: BeaconChainTypes>(
state: &'a BeaconState<T::EthSpec>,
validator_pubkey_cache: &'a ValidatorPubkeyCache<T>,
Expand Down
3 changes: 1 addition & 2 deletions beacon_node/beacon_chain/src/eth1_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,7 @@ fn get_sync_status<E: EthSpec>(

// Determine how many voting periods are contained in distance between
// now and genesis, rounding up.
let voting_periods_past =
(seconds_till_genesis + voting_period_duration - 1) / voting_period_duration;
let voting_periods_past = seconds_till_genesis.div_ceil(voting_period_duration);

// Return the start time of the current voting period*.
//
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/beacon_chain/src/observed_aggregates.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ pub trait SubsetItem {
fn root(&self) -> Result<Hash256, Error>;
}

impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> {
impl<E: EthSpec> SubsetItem for AttestationRef<'_, E> {
type Item = BitList<E::MaxValidatorsPerSlot>;
fn is_subset(&self, other: &Self::Item) -> bool {
match self {
Expand Down Expand Up @@ -159,7 +159,7 @@ impl<'a, E: EthSpec> SubsetItem for AttestationRef<'a, E> {
}
}

impl<'a, E: EthSpec> SubsetItem for &'a SyncCommitteeContribution<E> {
impl<E: EthSpec> SubsetItem for &SyncCommitteeContribution<E> {
type Item = BitVector<E::SyncSubcommitteeSize>;
fn is_subset(&self, other: &Self::Item) -> bool {
self.aggregation_bits.is_subset(other)
Expand Down
3 changes: 1 addition & 2 deletions beacon_node/lighthouse_network/gossipsub/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ keywords = ["peer-to-peer", "libp2p", "networking"]
categories = ["network-programming", "asynchronous"]

[features]
wasm-bindgen = ["getrandom/js"]
wasm-bindgen = ["getrandom/js", "futures-timer/wasm-bindgen"]
rsa = []

[dependencies]
Expand All @@ -22,7 +22,6 @@ bytes = "1.5"
either = "1.9"
fnv = "1.0.7"
futures = "0.3.30"
futures-ticker = "0.0.3"
futures-timer = "3.0.2"
getrandom = "0.2.12"
hashlink.workspace = true
Expand Down
3 changes: 1 addition & 2 deletions beacon_node/lighthouse_network/gossipsub/src/backoff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ pub(crate) struct BackoffStorage {

impl BackoffStorage {
fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize {
((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos())
as usize
d.as_nanos().div_ceil(heartbeat_interval.as_nanos()) as usize
}

pub(crate) fn new(
Expand Down
25 changes: 12 additions & 13 deletions beacon_node/lighthouse_network/gossipsub/src/behaviour.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ use std::{
time::Duration,
};

use futures::StreamExt;
use futures_ticker::Ticker;
use futures::FutureExt;
use hashlink::LinkedHashMap;
use prometheus_client::registry::Registry;
use rand::{seq::SliceRandom, thread_rng};
Expand Down Expand Up @@ -74,6 +73,7 @@ use super::{
types::RpcOut,
};
use super::{PublishError, SubscriptionError, TopicScoreParams, ValidationError};
use futures_timer::Delay;
use quick_protobuf::{MessageWrite, Writer};
use std::{cmp::Ordering::Equal, fmt::Debug};

Expand Down Expand Up @@ -301,7 +301,7 @@ pub struct Behaviour<D = IdentityTransform, F = AllowAllSubscriptionFilter> {
mcache: MessageCache,

/// Heartbeat interval stream.
heartbeat: Ticker,
heartbeat: Delay,

/// Number of heartbeats since the beginning of time; this allows us to amortize some resource
/// clean up -- eg backoff clean up.
Expand All @@ -318,7 +318,7 @@ pub struct Behaviour<D = IdentityTransform, F = AllowAllSubscriptionFilter> {
outbound_peers: HashSet<PeerId>,

/// Stores optional peer score data together with thresholds and decay interval.
peer_score: Option<(PeerScore, PeerScoreThresholds, Ticker)>,
peer_score: Option<(PeerScore, PeerScoreThresholds, Delay)>,

/// Counts the number of `IHAVE` received from each peer since the last heartbeat.
count_received_ihave: HashMap<PeerId, usize>,
Expand Down Expand Up @@ -466,10 +466,7 @@ where
config.backoff_slack(),
),
mcache: MessageCache::new(config.history_gossip(), config.history_length()),
heartbeat: Ticker::new_with_next(
config.heartbeat_interval(),
config.heartbeat_initial_delay(),
),
heartbeat: Delay::new(config.heartbeat_interval() + config.heartbeat_initial_delay()),
heartbeat_ticks: 0,
px_peers: HashSet::new(),
outbound_peers: HashSet::new(),
Expand Down Expand Up @@ -938,7 +935,7 @@ where
return Err("Peer score set twice".into());
}

let interval = Ticker::new(params.decay_interval);
let interval = Delay::new(params.decay_interval);
let peer_score = PeerScore::new_with_message_delivery_time_callback(params, callback);
self.peer_score = Some((peer_score, threshold, interval));
Ok(())
Expand Down Expand Up @@ -1208,7 +1205,7 @@ where
}

fn score_below_threshold_from_scores(
peer_score: &Option<(PeerScore, PeerScoreThresholds, Ticker)>,
peer_score: &Option<(PeerScore, PeerScoreThresholds, Delay)>,
peer_id: &PeerId,
threshold: impl Fn(&PeerScoreThresholds) -> f64,
) -> (bool, f64) {
Expand Down Expand Up @@ -3427,14 +3424,16 @@ where
}

// update scores
if let Some((peer_score, _, interval)) = &mut self.peer_score {
while let Poll::Ready(Some(_)) = interval.poll_next_unpin(cx) {
if let Some((peer_score, _, delay)) = &mut self.peer_score {
if delay.poll_unpin(cx).is_ready() {
peer_score.refresh_scores();
delay.reset(peer_score.params.decay_interval);
}
}

while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) {
if self.heartbeat.poll_unpin(cx).is_ready() {
self.heartbeat();
self.heartbeat.reset(self.config.heartbeat_interval());
}

Poll::Pending
Expand Down
Loading

0 comments on commit 0882e27

Please sign in to comment.