Skip to content

Commit

Permalink
feat(primitives): new type for sector number (#585)
Browse files Browse the repository at this point in the history
Co-authored-by: jmg-duarte <[email protected]>
  • Loading branch information
cernicc and jmg-duarte authored Nov 21, 2024
1 parent 5b5bdd8 commit 9d524a8
Show file tree
Hide file tree
Showing 33 changed files with 717 additions and 607 deletions.
287 changes: 144 additions & 143 deletions Cargo.lock

Large diffs are not rendered by default.

Binary file modified cli/artifacts/metadata.scale
Binary file not shown.
4 changes: 2 additions & 2 deletions cli/polka-storage-provider/client/src/commands/proofs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ impl ProofsCommand {

// Those are hardcoded for the showcase only.
// They should come from Storage Provider Node, precommits and other information.
let sector_id = 77;
let sector_id = 77.into();
let ticket = [12u8; 32];
let seed = [13u8; 32];

Expand Down Expand Up @@ -369,7 +369,7 @@ impl ProofsCommand {

// Those are hardcoded for the showcase only.
// They should come from Storage Provider Node, precommits and other information.
let sector_id = 77;
let sector_id = 77.into();
let randomness = [1u8; 32];

let output_path = if let Some(output_path) = output_path {
Expand Down
32 changes: 18 additions & 14 deletions cli/polka-storage-provider/server/src/db.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::{
path::Path,
sync::atomic::{AtomicU64, Ordering},
sync::atomic::{AtomicU32, Ordering},
};

use primitives_proofs::SectorNumber;
Expand Down Expand Up @@ -36,7 +36,7 @@ const COLUMN_FAMILIES: [&str; 2] = [ACCEPTED_DEAL_PROPOSALS_CF, SECTORS_CF];

pub struct DealDB {
database: RocksDB,
last_sector_number: AtomicU64,
last_sector_number: AtomicU32,
}

impl DealDB {
Expand All @@ -54,7 +54,7 @@ impl DealDB {

let db = Self {
database: RocksDB::open_cf_descriptors(&opts, path, cfs)?,
last_sector_number: AtomicU64::new(0),
last_sector_number: AtomicU32::new(0),
};

db.initialize_biggest_sector_number()?;
Expand Down Expand Up @@ -124,9 +124,10 @@ impl DealDB {
&self,
sector_number: SectorNumber,
) -> Result<Option<SectorType>, DBError> {
let Some(sector_slice) = self
.database
.get_pinned_cf(self.cf_handle(SECTORS_CF), sector_number.to_le_bytes())?
let Some(sector_slice) = self.database.get_pinned_cf(
self.cf_handle(SECTORS_CF),
u32::from(sector_number).to_le_bytes(),
)?
else {
return Ok(None);
};
Expand All @@ -143,7 +144,7 @@ impl DealDB {
sector: &SectorType,
) -> Result<(), DBError> {
let cf_handle = self.cf_handle(SECTORS_CF);
let key = sector_number.to_le_bytes();
let key = u32::from(sector_number).to_le_bytes();
let json = serde_json::to_vec(&sector)?;

self.database.put_cf(cf_handle, key, json)?;
Expand All @@ -157,35 +158,38 @@ impl DealDB {
/// And then `last_sector_number` is incremented by `next_sector_number` only
/// If it was called by multiple threads later than initialization, it could cause a race condition and data erasure.
fn initialize_biggest_sector_number(&self) -> Result<(), DBError> {
let mut biggest_sector_number = 0;
let mut biggest_sector_number = 0.into();
for item in self
.database
.iterator_cf(self.cf_handle(SECTORS_CF), rocksdb::IteratorMode::Start)
{
let (key, _) = item?;
let key: [u8; 8] = key
let key: [u8; 4] = key
.as_ref()
.try_into()
.expect("sector's key to be u64 le bytes");
let sector_id = SectorNumber::from_le_bytes(key);
.expect("sector's key to be u32 le bytes");
// Unwrap safe. Can only fail if the sector number was manually
// inserted in the database.
let sector_id =
SectorNumber::new(u32::from_le_bytes(key)).expect("valid sector number");
biggest_sector_number = std::cmp::max(biggest_sector_number, sector_id);
}

// [`Ordering::Relaxed`] can be used here as this function is executed only on start-up and once.
// We don't mind, it's just a initialization.
self.last_sector_number
.store(biggest_sector_number, Ordering::Relaxed);
.store(biggest_sector_number.into(), Ordering::Relaxed);
Ok(())
}

/// Atomically increments sector_id counter, so it can be used as an identifier by a sector.
/// Prior to all of the calls to this function, `initialize_biggest_sector_id` must be called at the node start-up.
pub fn next_sector_number(&self) -> SectorNumber {
pub fn next_sector_number(&self) -> Result<SectorNumber, &'static str> {
// [`Ordering::Relaxed`] can be used here, as it's an update on a single variable.
// It does not depend on other Atomic variables and it does not matter which thread makes it first.
// We just need it to be different on every thread that calls it concurrently, so the ids are not duplicated.
let previous = self.last_sector_number.fetch_add(1, Ordering::Relaxed);
previous + 1
SectorNumber::try_from(previous + 1)
}

// NOTE(@jmg-duarte,03/10/2024): I think that from here onwards we're very close of reinventing the LID, but so be it
Expand Down
7 changes: 6 additions & 1 deletion cli/polka-storage-provider/server/src/pipeline/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ pub enum PipelineError {
RandomnessNotAvailable,
#[error(transparent)]
SendError(#[from] SendError<PipelineMessage>),
#[error("Custom error: {0}")]
CustomError(String),
}
/// Pipeline shared state.
pub struct PipelineState {
Expand Down Expand Up @@ -191,7 +193,10 @@ async fn find_sector_for_piece(
) -> Result<UnsealedSector, PipelineError> {
// TODO(@th7nder,30/10/2024): simplification, we're always creating a new sector for storing a piece.
// It should not work like that, sectors should be filled with pieces according to *some* algorithm.
let sector_number = state.db.next_sector_number();
let sector_number = state
.db
.next_sector_number()
.map_err(|err| PipelineError::CustomError(err.to_string()))?;
let unsealed_path = state.unsealed_sectors_dir.join(sector_number.to_string());
let sector = UnsealedSector::create(sector_number, unsealed_path).await?;

Expand Down
8 changes: 6 additions & 2 deletions cli/polka-storage/storagext/src/runtime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ pub mod display;
path = "primitives_proofs::types::SectorSize",
with = "::primitives_proofs::SectorSize",
),
substitute_type(
path = "primitives_proofs::types::SectorNumber",
with = "::primitives_proofs::SectorNumber",
),
// impl Deserialize
derive_for_type(
path = "pallet_market::pallet::ActiveDealState",
Expand Down Expand Up @@ -131,7 +135,7 @@ mod test {
)
.unwrap();

assert_eq!(active_deal_state.sector_number, 1);
assert_eq!(active_deal_state.sector_number, 1.into());
assert_eq!(active_deal_state.sector_start_block, 10);
assert_eq!(active_deal_state.last_updated_block, Some(20));
assert_eq!(active_deal_state.slash_block, None);
Expand Down Expand Up @@ -161,7 +165,7 @@ mod test {
assert_eq!(
deal_state,
DealState::Active(ActiveDealState {
sector_number: 1,
sector_number: 1.into(),
sector_start_block: 10,
last_updated_block: Some(20),
slash_block: None
Expand Down
16 changes: 8 additions & 8 deletions cli/polka-storage/storagext/src/types/storage_provider.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ impl From<ProveCommitSector> for RuntimeProveCommitSector {
pub struct FaultDeclaration {
pub deadline: u64,
pub partition: u32,
pub sectors: BTreeSet<u64>,
pub sectors: BTreeSet<SectorNumber>,
}

impl From<FaultDeclaration> for RuntimeFaultDeclaration {
Expand Down Expand Up @@ -201,7 +201,7 @@ impl PartialEq<RuntimeFaultDeclaration> for FaultDeclaration {
pub struct RecoveryDeclaration {
pub deadline: u64,
pub partition: u32,
pub sectors: BTreeSet<u64>,
pub sectors: BTreeSet<SectorNumber>,
}

impl From<RecoveryDeclaration> for RuntimeRecoveryDeclaration {
Expand Down Expand Up @@ -286,7 +286,7 @@ impl Into<RuntimeSubmitWindowedPoStParams> for SubmitWindowedPoStParams {
pub struct TerminationDeclaration {
pub deadline: u64,
pub partition: u32,
pub sectors: BTreeSet<u64>,
pub sectors: BTreeSet<SectorNumber>,
}

impl From<TerminationDeclaration> for RuntimeTerminationDeclaration {
Expand Down Expand Up @@ -340,7 +340,7 @@ mod tests {
let expected = FaultDeclaration {
deadline: 0,
partition: 0,
sectors: BTreeSet::from_iter([0, 1].into_iter()),
sectors: BTreeSet::from_iter([0.into(), 1.into()].into_iter()),
};
assert_eq!(expected, result);
}
Expand All @@ -358,7 +358,7 @@ mod tests {
let expected = vec![FaultDeclaration {
deadline: 0,
partition: 0,
sectors: BTreeSet::from_iter([0, 1].into_iter()),
sectors: BTreeSet::from_iter([0.into(), 1.into()].into_iter()),
}];
assert_eq!(expected, result);
}
Expand All @@ -376,7 +376,7 @@ mod tests {
let expected = RecoveryDeclaration {
deadline: 0,
partition: 0,
sectors: BTreeSet::from_iter([0, 1].into_iter()),
sectors: BTreeSet::from_iter([0.into(), 1.into()].into_iter()),
};
assert_eq!(expected, result);
}
Expand All @@ -394,7 +394,7 @@ mod tests {
let expected = vec![RecoveryDeclaration {
deadline: 0,
partition: 0,
sectors: BTreeSet::from_iter([0, 1].into_iter()),
sectors: BTreeSet::from_iter([0.into(), 1.into()].into_iter()),
}];
assert_eq!(expected, result);
}
Expand Down Expand Up @@ -500,7 +500,7 @@ mod tests {
vec![TerminationDeclaration {
deadline: 69,
partition: 420,
sectors: BTreeSet::from([1, 2]),
sectors: BTreeSet::from([1.into(), 2.into()]),
}]
)
}
Expand Down
2 changes: 1 addition & 1 deletion lib/polka-storage-proofs/src/porep/sealer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ impl Sealer {
cache_path: CacheDirectory,
replica_path: SealedSector,
prover_id: ProverId,
sector_id: u64,
sector_id: SectorNumber,
ticket: Ticket,
seed: Option<Ticket>,
pre_commit: PreCommitOutput,
Expand Down
14 changes: 7 additions & 7 deletions maat/tests/real_world.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,8 +178,8 @@ async fn publish_storage_deals<Keypair>(
label,
start_block: 85,
end_block: 165,
storage_price_per_block: 300000000,
provider_collateral: 12500000000,
storage_price_per_block: 300_000_000,
provider_collateral: 12_500_000_000,
state: DealState::Published,
};

Expand Down Expand Up @@ -232,7 +232,7 @@ where

let sectors_pre_commit_info = vec![SectorPreCommitInfo {
seal_proof: primitives_proofs::RegisteredSealProof::StackedDRG2KiBV1P1,
sector_number: 1,
sector_number: 1.into(),
sealed_cid,
deal_ids: vec![0],
expiration: 195,
Expand Down Expand Up @@ -268,7 +268,7 @@ async fn prove_commit_sector<Keypair>(
Keypair: subxt::tx::Signer<PolkaStorageConfig>,
{
let expected_results = vec![ProveCommitResult {
sector_number: 1,
sector_number: 1.into(),
partition_number: 0,
deadline_idx: 0,
}];
Expand Down Expand Up @@ -298,7 +298,7 @@ async fn prove_commit_sector<Keypair>(
.prove_commit_sectors(
charlie,
vec![ProveCommitSector {
sector_number: 1,
sector_number: 1.into(),
proof,
}
.into()],
Expand Down Expand Up @@ -360,7 +360,7 @@ where
let recovery_declarations = vec![RecoveryDeclaration {
deadline: 0,
partition: 0,
sectors: BTreeSet::from_iter([1u64].into_iter()),
sectors: BTreeSet::from_iter([1.into()].into_iter()),
}];
let faults_recovered_result = client
.declare_faults_recovered(charlie, recovery_declarations.clone(), true)
Expand All @@ -385,7 +385,7 @@ where
let fault_declarations = vec![FaultDeclaration {
deadline: 0,
partition: 0,
sectors: BTreeSet::from_iter([1u64].into_iter()),
sectors: BTreeSet::from_iter([1.into()].into_iter()),
}];
let fault_declaration_result = client
.declare_faults(charlie, fault_declarations.clone(), true)
Expand Down
6 changes: 2 additions & 4 deletions pallets/market/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1339,10 +1339,8 @@ pub mod pallet {

let mut pending_proposals = PendingProposals::<T>::get();
for sector in sector_deals {
let mut sector_activated_deal_ids: BoundedVec<
SectorNumber,
ConstU32<MAX_DEALS_PER_SECTOR>,
> = BoundedVec::new();
let mut sector_activated_deal_ids: BoundedVec<u64, ConstU32<MAX_DEALS_PER_SECTOR>> =
BoundedVec::new();

let Ok(proposals) = Self::proposals_for_deals(sector.deal_ids) else {
log::error!("failed to find deals for sector: {}", sector.sector_number);
Expand Down
Loading

0 comments on commit 9d524a8

Please sign in to comment.