Skip to content

Commit

Permalink
feat: add lmdb for block cache to save memory (#229)
Browse files Browse the repository at this point in the history
  • Loading branch information
stringhandler authored Dec 17, 2024
1 parent fe797c7 commit 74128f6
Show file tree
Hide file tree
Showing 10 changed files with 551 additions and 233 deletions.
91 changes: 80 additions & 11 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ thiserror = "1.0"
tokio = { version = "1.41.0", features = ["full"] }
tonic = "0.12.3"
lru = "0.12.5"
tempfile = "3.14.0"
rkv = { version = "0.19.0", features = ["lmdb"] }

[package.metadata.cargo-machete]
ignored = ["log4rs"]
6 changes: 6 additions & 0 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use std::{
fs::File,
io::Write,
panic,
process,
time::{SystemTime, UNIX_EPOCH},
};

Expand Down Expand Up @@ -56,6 +57,11 @@ async fn main() -> anyhow::Result<()> {
let mut file = File::create("panic.log").unwrap();
file.write_all(format!("Panic at {}: {}", location, message).as_bytes())
.unwrap();
if cfg!(debug_assertions) {
// In debug mode, we want to see the panic message
eprintln!("Panic occurred at {}: {}", location, message);
process::exit(500);
}
}));

Cli::parse().handle_command(Shutdown::new().to_signal()).await?;
Expand Down
24 changes: 21 additions & 3 deletions src/server/p2p/network.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2124,14 +2124,24 @@ where S: ShareChain
if num_connections > 20 {
continue;
}
if num_connections == 0 {
match self.dial_seed_peers().await {
Ok(_) => {},
Err(e) => {
warn!(target: LOG_TARGET, "Failed to dial seed peers: {e:?}");
},
}
continue;
}

let mut num_dialed = 0;
let store_read_lock = self.network_peer_store.read().await;
// Rather try and search good peers rather than randomly dialing
// 1000 peers will take a long time to get through
for record in store_read_lock.whitelist_peers().values() {
// Only dial seed peers if we have 0 connections
if !self.swarm.is_connected(&record.peer_id)
&& (num_connections == 0 || !store_read_lock.is_seed_peer(&record.peer_id)) {
&& !store_read_lock.is_seed_peer(&record.peer_id) {
let _unused = self.swarm.dial(record.peer_id);
num_dialed += 1;
// We can only do 30 connections
Expand Down Expand Up @@ -2430,6 +2440,13 @@ where S: ShareChain
self.query_tx.clone()
}

pub async fn dial_seed_peers(&mut self) -> Result<(), Error> {
info!(target: LOG_TARGET, squad = &self.config.squad; "Dialing seed peers...");
let seed_peers = self.parse_seed_peers().await?;
self.join_seed_peers(seed_peers).await?;
Ok(())
}

/// Starts p2p service.
/// Please note that this is a blocking call!
pub async fn start(&mut self) -> Result<(), Error> {
Expand Down Expand Up @@ -2457,8 +2474,9 @@ where S: ShareChain
}
self.subscribe_to_topics().await;

let seed_peers = self.parse_seed_peers().await?;
self.join_seed_peers(seed_peers).await?;
self.dial_seed_peers().await?;
// let seed_peers = self.parse_seed_peers().await?;
// self.join_seed_peers(seed_peers).await?;

// start initial share chain sync
// let in_progress = self.sync_in_progress.clone();
Expand Down
Loading

0 comments on commit 74128f6

Please sign in to comment.