Skip to content

Commit

Permalink
Merge branch 'main' into ab/sqlx
Browse files Browse the repository at this point in the history
  • Loading branch information
imabdulbasit authored Oct 11, 2024
2 parents 7a56b6e + b70b7bd commit cf789f7
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 16 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,4 @@ jobs:
run: cargo fmt -- --check

- name: Check
run: cargo clippy --workspace --all-features --all-targets # Removing "-- -D warnings" warning because CI is complaining. TODO add back
run: cargo clippy --workspace --all-features --all-targets -- -D warnings
6 changes: 3 additions & 3 deletions tests/common/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ impl TestConfig {
.unwrap();

// Varies between v0 and v3.
let load_generator_url = if sequencer_version >= 03 {
let load_generator_url = if sequencer_version >= 3 {
url_from_port(dotenvy::var(
"ESPRESSO_SUBMIT_TRANSACTIONS_PRIVATE_RESERVE_PORT",
)?)?
Expand All @@ -91,7 +91,7 @@ impl TestConfig {
};

// TODO test both builders (probably requires some refactoring).
let builder_url = if sequencer_version >= 03 {
let builder_url = if sequencer_version >= 3 {
let url = url_from_port(dotenvy::var("ESPRESSO_RESERVE_BUILDER_SERVER_PORT")?)?;

Url::from_str(&url)?
Expand All @@ -108,7 +108,7 @@ impl TestConfig {

let l1_provider_url = url_from_port(dotenvy::var("ESPRESSO_SEQUENCER_L1_PORT")?)?;
let sequencer_api_url = url_from_port(dotenvy::var("ESPRESSO_SEQUENCER1_API_PORT")?)?;
let sequencer_clients = vec![
let sequencer_clients = [
dotenvy::var("ESPRESSO_SEQUENCER_API_PORT")?,
dotenvy::var("ESPRESSO_SEQUENCER1_API_PORT")?,
dotenvy::var("ESPRESSO_SEQUENCER2_API_PORT")?,
Expand Down
58 changes: 46 additions & 12 deletions types/src/v0/impls/l1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,24 +80,54 @@ impl L1Client {
}

pub async fn wait_for_block(&self, number: u64) -> L1BlockInfo {
// When we are polling the L1 waiting for new blocks, we use a long interval calibrated to
// the rate at which L1 blocks are produced.
let interval = self.provider.get_interval();
// When we are retrying after an error, or expecting a block to appear any second, we use a
// shorter polling interval.
let retry = self.retry_delay;

// Wait until we expect the block to be available.
let l1_head = loop {
let l1_head = self.get_block_number().await;
if l1_head >= number {
// The block should be ready to retrieve.
break l1_head;
} else if l1_head + 1 == number {
// The block we want is the next L1 block. It could be ready any second, so don't
// sleep for too long.
tracing::info!(number, l1_head, "waiting for next L1 block");
sleep(retry).await;
} else {
// We are waiting at least a few more L1 blocks, so back off and don't spam the RPC.
tracing::info!(number, l1_head, "waiting for future L1 block");
sleep(interval).await;
}
};

// The block should be ready now, but we may still get errors from the RPC, so we retry
// until we successfully pull the block.
loop {
let block = match self.provider.get_block(number).await {
Ok(Some(block)) => block,
Ok(None) => {
tracing::info!(number, "no such block");
sleep(interval).await;
tracing::warn!(
number,
l1_head,
"expected L1 block to be available; possible L1 reorg"
);
sleep(retry).await;
continue;
}
Err(err) => {
tracing::error!(%err, number, "failed to get L1 block");
sleep(interval).await;
tracing::error!(number, l1_head, "failed to get L1 block: {err:#}");
sleep(retry).await;
continue;
}
};
let Some(hash) = block.hash else {
tracing::error!(number, ?block, "L1 block has no hash");
sleep(interval).await;
tracing::error!(number, l1_head, ?block, "L1 block has no hash");
sleep(retry).await;
continue;
};
break L1BlockInfo {
Expand All @@ -112,29 +142,33 @@ impl L1Client {
/// If the desired block number is not finalized yet, this function will block until it becomes
/// finalized.
pub async fn wait_for_finalized_block(&self, number: u64) -> L1BlockInfo {
let interval = self.provider.get_interval();
// First just wait for the block to exist. This uses an efficient polling mechanism that
// polls more frequently as we get closer to expecting the block to be ready.
self.wait_for_block(number).await;

// Wait for the block to finalize.
// Wait for the block to finalize. Since we know the block at least exists, we don't expect
// to have to wait _too_ long for it to finalize, so we poll relatively frequently, using
// the retry delay instead of the provider interval.
let finalized = loop {
let Some(block) = self.get_finalized_block().await else {
tracing::info!("waiting for finalized block");
sleep(interval).await;
sleep(self.retry_delay).await;
continue;
};
if block.number >= number {
break block;
}
tracing::info!(current_finalized = %block.number, "waiting for finalized block");
sleep(interval).await;
sleep(self.retry_delay).await;
continue;
};

if finalized.number == number {
return finalized;
}

// The finalized block may have skipped over the block of interest. In this case, our block
// is still finalized, since it is before the finalized block. We just need to fetch it.
// Load the block again, since it may have changed between first being produced and becoming
// finalized.
self.wait_for_block(number).await
}

Expand Down

0 comments on commit cf789f7

Please sign in to comment.