From c2fffb988711abfee580040ac52bed083302b89b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 17 Dec 2023 02:06:51 -0500 Subject: [PATCH] Correct a couple years of accumulated typos --- coins/bitcoin/src/rpc.rs | 2 +- coins/bitcoin/src/wallet/send.rs | 2 +- coins/monero/src/wallet/scan.rs | 2 +- common/db/src/create_db.rs | 2 +- common/db/src/mem.rs | 2 +- coordinator/src/substrate/cosign.rs | 2 +- coordinator/src/tributary/handle.rs | 2 +- coordinator/src/tributary/signing_protocol.rs | 2 +- coordinator/tributary/src/tests/blockchain.rs | 2 +- crypto/ciphersuite/src/lib.rs | 2 +- crypto/dalek-ff-group/src/field.rs | 2 +- crypto/dkg/src/encryption.rs | 2 +- crypto/dleq/src/cross_group/scalar.rs | 2 +- crypto/dleq/src/lib.rs | 2 +- crypto/ed448/src/backend.rs | 2 +- crypto/frost/src/algorithm.rs | 2 +- crypto/frost/src/tests/vectors.rs | 2 +- crypto/multiexp/Cargo.toml | 2 +- crypto/multiexp/README.md | 2 +- crypto/multiexp/src/lib.rs | 4 ++-- crypto/multiexp/src/pippenger.rs | 2 +- crypto/multiexp/src/straus.rs | 2 +- crypto/schnorr/src/lib.rs | 2 +- message-queue/src/messages.rs | 2 +- orchestration/coordinator/Dockerfile | 4 ++-- .../coordinator/Dockerfile.coordinator.end | 4 ++-- .../processor/Dockerfile.processor.end | 4 ++-- orchestration/processor/bitcoin/Dockerfile | 4 ++-- orchestration/processor/monero/Dockerfile | 4 ++-- processor/messages/src/lib.rs | 10 +++++----- processor/src/key_gen.rs | 2 +- processor/src/multisigs/scanner.rs | 2 +- processor/src/multisigs/scheduler.rs | 4 ++-- processor/src/networks/bitcoin.rs | 4 ++-- processor/src/plan.rs | 4 ++-- processor/src/tests/scanner.rs | 2 +- substrate/primitives/src/networks.rs | 2 +- substrate/signals/pallet/src/lib.rs | 18 +++++++++--------- tests/processor/src/tests/key_gen.rs | 2 +- tests/processor/src/tests/send.rs | 6 +++--- 40 files changed, 63 insertions(+), 63 deletions(-) diff --git a/coins/bitcoin/src/rpc.rs b/coins/bitcoin/src/rpc.rs index 38b53cc76..6778636bd 100644 --- a/coins/bitcoin/src/rpc.rs +++ b/coins/bitcoin/src/rpc.rs @@ -84,7 +84,7 @@ impl Rpc { for line in res.split('\n') { // This doesn't check if the arguments are as expected // This is due to Bitcoin supporting a large amount of optional arguments, which - // occassionally change, with their own mechanism of text documentation, making matching off + // occasionally change, with their own mechanism of text documentation, making matching off // it a quite involved task // Instead, once we've confirmed the methods are present, we assume our arguments are aligned // Else we'll error at time of call diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index 9d98f9fad..f4cfa3b5d 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -186,7 +186,7 @@ impl SignableTransaction { // src/policy/policy.cpp#L295-L298 // implements this as expected - // Technically, it takes whatever's greater, the weight or the amount of signature operatons + // Technically, it takes whatever's greater, the weight or the amount of signature operations // multiplied by DEFAULT_BYTES_PER_SIGOP (20) // We only use 1 signature per input, and our inputs have a weight exceeding 20 // Accordingly, our inputs' weight will always be greater than the cost of the signature ops diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index 71e949909..db8a2ae54 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -397,7 +397,7 @@ impl Scanner { } let subaddress = *subaddress.unwrap(); - // If it has torsion, it'll substract the non-torsioned shared key to a torsioned key + // If it has torsion, it'll subtract the non-torsioned shared key to a torsioned key // We will not have a torsioned key in our HashMap of keys, so we wouldn't identify it as // ours // If we did though, it'd enable bypassing the included burning bug protection diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index a1e182a9f..2f3626d21 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -22,7 +22,7 @@ pub fn serai_db_key( /// /// * `db_name` - A database name /// * `field_name` - An item name -/// * `args` - Comma seperated list of key arguments +/// * `args` - Comma separated list of key arguments /// * `field_type` - The return type /// /// # Example diff --git a/common/db/src/mem.rs b/common/db/src/mem.rs index f52408693..ecac300ec 100644 --- a/common/db/src/mem.rs +++ b/common/db/src/mem.rs @@ -6,7 +6,7 @@ use std::{ use crate::*; -/// An atomic operation for the in-memory databae. +/// An atomic operation for the in-memory database. #[must_use] #[derive(PartialEq, Eq, Debug)] pub struct MemDbTxn<'a>(&'a MemDb, HashMap, Vec>, HashSet>); diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index 7d2b41d3d..2443c8116 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -140,7 +140,7 @@ async fn potentially_cosign_block( // If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks // trigger a cosigning protocol covering it - // This means there will be the maximum delay allowed from a block needing cosigning occuring + // This means there will be the maximum delay allowed from a block needing cosigning occurring // and a cosign for it triggering let maximally_latent_cosign_block = skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index 8ac1d2c38..df2d284ef 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -42,7 +42,7 @@ pub fn dkg_confirmation_nonces( .preprocess() } -// If there's an error generating a key pair, return any errors which would've occured when +// If there's an error generating a key pair, return any errors which would've occurred when // executing the DkgConfirmer in order to stay in sync with those who did. // // The caller must ensure only error_generating_key_pair or generated_key_pair is called for a diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs index 45d95b1a9..94e26cb0d 100644 --- a/coordinator/src/tributary/signing_protocol.rs +++ b/coordinator/src/tributary/signing_protocol.rs @@ -38,7 +38,7 @@ only way to operate on distinct received messages would be if: 1) A logical flaw exists, letting new messages over write prior messages - 2) A reorganization occured from chain A to chain B, and with it, different messages + 2) A reorganization occurred from chain A to chain B, and with it, different messages Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While a significant amount of processes may be byzantine, leading to BFT being broken, that still will diff --git a/coordinator/tributary/src/tests/blockchain.rs b/coordinator/tributary/src/tests/blockchain.rs index 137ed222d..6103a62f4 100644 --- a/coordinator/tributary/src/tests/blockchain.rs +++ b/coordinator/tributary/src/tests/blockchain.rs @@ -74,7 +74,7 @@ fn invalid_block() { assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } - // Mutate tranactions merkle + // Mutate transactions merkle { let mut block = block; block.header.transactions = Blake2s256::digest(block.header.transactions).into(); diff --git a/crypto/ciphersuite/src/lib.rs b/crypto/ciphersuite/src/lib.rs index 3954047d8..e5ea66453 100644 --- a/crypto/ciphersuite/src/lib.rs +++ b/crypto/ciphersuite/src/lib.rs @@ -52,7 +52,7 @@ pub trait Ciphersuite: /// Group element type. type G: Group + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq; /// Hash algorithm used with this curve. - // Requires BlockSizeUser so it can be used within Hkdf which requies that. + // Requires BlockSizeUser so it can be used within Hkdf which requires that. type H: Send + Clone + BlockSizeUser + Digest + HashMarker + SecureDigest; /// ID for this curve. diff --git a/crypto/dalek-ff-group/src/field.rs b/crypto/dalek-ff-group/src/field.rs index 2e83f7bf5..b1af27114 100644 --- a/crypto/dalek-ff-group/src/field.rs +++ b/crypto/dalek-ff-group/src/field.rs @@ -222,7 +222,7 @@ impl FieldElement { FieldElement(reduce(U512::from(value.mul_wide(&value)))) } - /// Perform an exponentation. + /// Perform an exponentiation. pub fn pow(&self, other: FieldElement) -> FieldElement { let mut table = [FieldElement::ONE; 16]; table[1] = *self; diff --git a/crypto/dkg/src/encryption.rs b/crypto/dkg/src/encryption.rs index 8c5d8612f..51cf6b060 100644 --- a/crypto/dkg/src/encryption.rs +++ b/crypto/dkg/src/encryption.rs @@ -118,7 +118,7 @@ fn cipher(context: &str, ecdh: &Zeroizing) -> ChaCha20 { zeroize(challenge.as_mut()); // Since the key is single-use, it doesn't matter what we use for the IV - // The isssue is key + IV reuse. If we never reuse the key, we can't have the opportunity to + // The issue is key + IV reuse. If we never reuse the key, we can't have the opportunity to // reuse a nonce // Use a static IV in acknowledgement of this let mut iv = Cc20Iv::default(); diff --git a/crypto/dleq/src/cross_group/scalar.rs b/crypto/dleq/src/cross_group/scalar.rs index d57991b9a..8f216a882 100644 --- a/crypto/dleq/src/cross_group/scalar.rs +++ b/crypto/dleq/src/cross_group/scalar.rs @@ -20,7 +20,7 @@ pub fn scalar_normalize( let mut res1 = F0::ZERO; let mut res2 = F1::ZERO; - // Uses the bits API to ensure a consistent endianess + // Uses the bits API to ensure a consistent endianness let mut bits = scalar.to_le_bits(); scalar.zeroize(); // Convert it to big endian diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index ae5391927..5b813b648 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -28,7 +28,7 @@ mod tests; pub(crate) fn challenge(transcript: &mut T) -> F { // From here, there are three ways to get a scalar under the ff/group API // 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge"))) - // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess + // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness // and loading it in // 3: Iterating over each byte and manually doubling/adding. This is simplest diff --git a/crypto/ed448/src/backend.rs b/crypto/ed448/src/backend.rs index 00f87a1c8..83dc3fcaa 100644 --- a/crypto/ed448/src/backend.rs +++ b/crypto/ed448/src/backend.rs @@ -139,7 +139,7 @@ macro_rules! field { } impl $FieldName { - /// Perform an exponentation. + /// Perform an exponentiation. pub fn pow(&self, other: $FieldName) -> $FieldName { let mut table = [Self(Residue::ONE); 16]; table[1] = *self; diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index 59aa2dd9b..f2da59ea1 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -51,7 +51,7 @@ pub trait Algorithm: Send + Sync + Clone { /// Read an addendum from a reader. fn read_addendum(&self, reader: &mut R) -> io::Result; - /// Proccess the addendum for the specified participant. Guaranteed to be called in order. + /// Process the addendum for the specified participant. Guaranteed to be called in order. fn process_addendum( &mut self, params: &ThresholdView, diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 275e36f52..3356a6cdd 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -43,7 +43,7 @@ pub struct Vectors { } // Vectors are expected to be formatted per the IETF proof of concept -// The included vectors are direcly from +// The included vectors are directly from // https://github.com/cfrg/draft-irtf-cfrg-frost/tree/draft-irtf-cfrg-frost-14/poc #[cfg(test)] impl From for Vectors { diff --git a/crypto/multiexp/Cargo.toml b/crypto/multiexp/Cargo.toml index 95dcebaa0..27b47ea9e 100644 --- a/crypto/multiexp/Cargo.toml +++ b/crypto/multiexp/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "multiexp" version = "0.4.0" -description = "Multiexponentation algorithms for ff/group" +description = "Multiexponentiation algorithms for ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/multiexp" authors = ["Luke Parker "] diff --git a/crypto/multiexp/README.md b/crypto/multiexp/README.md index 4ee3c56c2..1366f7a6b 100644 --- a/crypto/multiexp/README.md +++ b/crypto/multiexp/README.md @@ -2,7 +2,7 @@ A multiexp implementation for ff/group implementing Straus and Pippenger. A batch verification API is also available via the "batch" feature, which enables -secure multiexponentation batch verification given a series of values which +secure multiexponentiation batch verification given a series of values which should sum to the identity, identifying which doesn't via binary search if they don't. diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index 8d619b61e..cf0133fca 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -173,7 +173,7 @@ fn algorithm(len: usize) -> Algorithm { } } -/// Performs a multiexponentation, automatically selecting the optimal algorithm based on the +/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the /// amount of pairs. pub fn multiexp(pairs: &[(G::Scalar, G)]) -> G where @@ -188,7 +188,7 @@ where } } -/// Performs a multiexponentation in variable time, automatically selecting the optimal algorithm +/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm /// based on the amount of pairs. pub fn multiexp_vartime(pairs: &[(G::Scalar, G)]) -> G where diff --git a/crypto/multiexp/src/pippenger.rs b/crypto/multiexp/src/pippenger.rs index e182d51ea..10d7d1410 100644 --- a/crypto/multiexp/src/pippenger.rs +++ b/crypto/multiexp/src/pippenger.rs @@ -5,7 +5,7 @@ use group::Group; use crate::prep_bits; -// Pippenger's algorithm for multiexponentation, as published in the SIAM Journal on Computing +// Pippenger's algorithm for multiexponentiation, as published in the SIAM Journal on Computing // DOI: 10.1137/0209022 pub(crate) fn pippenger(pairs: &[(G::Scalar, G)], window: u8) -> G where diff --git a/crypto/multiexp/src/straus.rs b/crypto/multiexp/src/straus.rs index 4eadd2130..6f472c057 100644 --- a/crypto/multiexp/src/straus.rs +++ b/crypto/multiexp/src/straus.rs @@ -22,7 +22,7 @@ fn prep_tables(pairs: &[(G::Scalar, G)], window: u8) -> Vec> { tables } -// Straus's algorithm for multiexponentation, as published in The American Mathematical Monthly +// Straus's algorithm for multiexponentiation, as published in The American Mathematical Monthly // DOI: 10.2307/2310929 pub(crate) fn straus(pairs: &[(G::Scalar, G)], window: u8) -> G where diff --git a/crypto/schnorr/src/lib.rs b/crypto/schnorr/src/lib.rs index 282f7c18e..ecca87f73 100644 --- a/crypto/schnorr/src/lib.rs +++ b/crypto/schnorr/src/lib.rs @@ -83,7 +83,7 @@ impl SchnorrSignature { } /// Return the series of pairs whose products sum to zero for a valid signature. - /// This is inteded to be used with a multiexp. + /// This is intended to be used with a multiexp. pub fn batch_statements(&self, public_key: C::G, challenge: C::F) -> [(C::F, C::G); 3] { // s = r + ca // sG == R + cA diff --git a/message-queue/src/messages.rs b/message-queue/src/messages.rs index 65c18dd20..942f3ff51 100644 --- a/message-queue/src/messages.rs +++ b/message-queue/src/messages.rs @@ -61,7 +61,7 @@ pub fn ack_challenge( id: u64, nonce: ::G, ) -> ::F { - let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Ackowledgement"); + let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Acknowledgement"); transcript.domain_separate(b"metadata"); transcript.append_message(b"to", borsh::to_vec(&to).unwrap()); transcript.append_message(b"to_key", to_key.to_bytes()); diff --git a/orchestration/coordinator/Dockerfile b/orchestration/coordinator/Dockerfile index e58301fb8..ff368cd25 100644 --- a/orchestration/coordinator/Dockerfile +++ b/orchestration/coordinator/Dockerfile @@ -65,8 +65,8 @@ USER coordinator WORKDIR /home/coordinator # Copy the Coordinator binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-coordinator /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-coordinator /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run coordinator CMD ["serai-coordinator"] diff --git a/orchestration/coordinator/Dockerfile.coordinator.end b/orchestration/coordinator/Dockerfile.coordinator.end index c96e49f2a..9c8bcd3dc 100644 --- a/orchestration/coordinator/Dockerfile.coordinator.end +++ b/orchestration/coordinator/Dockerfile.coordinator.end @@ -8,8 +8,8 @@ USER coordinator WORKDIR /home/coordinator # Copy the Coordinator binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-coordinator /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-coordinator /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run coordinator CMD ["serai-coordinator"] diff --git a/orchestration/processor/Dockerfile.processor.end b/orchestration/processor/Dockerfile.processor.end index cd0e6e970..410ba5e80 100644 --- a/orchestration/processor/Dockerfile.processor.end +++ b/orchestration/processor/Dockerfile.processor.end @@ -8,8 +8,8 @@ USER processor WORKDIR /home/processor # Copy the Processor binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD ["serai-processor"] diff --git a/orchestration/processor/bitcoin/Dockerfile b/orchestration/processor/bitcoin/Dockerfile index 28c3d15c1..5e77e4736 100644 --- a/orchestration/processor/bitcoin/Dockerfile +++ b/orchestration/processor/bitcoin/Dockerfile @@ -65,8 +65,8 @@ USER processor WORKDIR /home/processor # Copy the Processor binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD ["serai-processor"] diff --git a/orchestration/processor/monero/Dockerfile b/orchestration/processor/monero/Dockerfile index 835647f36..63d83bd28 100644 --- a/orchestration/processor/monero/Dockerfile +++ b/orchestration/processor/monero/Dockerfile @@ -65,8 +65,8 @@ USER processor WORKDIR /home/processor # Copy the Processor binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD ["serai-processor"] diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 2983fc235..f9f29790d 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -306,7 +306,7 @@ impl_from!(substrate, ProcessorMessage, Substrate); // Intent generation code const COORDINATOR_UID: u8 = 0; -const PROCESSSOR_UID: u8 = 1; +const PROCESSOR_UID: u8 = 1; const TYPE_KEY_GEN_UID: u8 = 2; const TYPE_SIGN_UID: u8 = 3; @@ -401,7 +401,7 @@ impl ProcessorMessage { key_gen::ProcessorMessage::Blame { id, .. } => (5, id), }; - let mut res = vec![PROCESSSOR_UID, TYPE_KEY_GEN_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub]; res.extend(&id.encode()); res } @@ -415,7 +415,7 @@ impl ProcessorMessage { sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), }; - let mut res = vec![PROCESSSOR_UID, TYPE_SIGN_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; res.extend(&id); res } @@ -430,7 +430,7 @@ impl ProcessorMessage { coordinator::ProcessorMessage::CosignedBlock { block, .. } => (5, block.encode()), }; - let mut res = vec![PROCESSSOR_UID, TYPE_COORDINATOR_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; res.extend(&id); res } @@ -443,7 +443,7 @@ impl ProcessorMessage { } }; - let mut res = vec![PROCESSSOR_UID, TYPE_SUBSTRATE_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub]; res.extend(&id); res } diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 8185eb4fa..f1a5b47c0 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -572,7 +572,7 @@ impl KeyGen { .unwrap() .blame(accuser, accused, network_share, network_blame); - // If thw accused was blamed for either, mark them as at fault + // If the accused was blamed for either, mark them as at fault if (substrate_blame == accused) || (network_blame == accused) { return ProcessorMessage::Blame { id, participant: accused }; } diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index 1a13ba3d2..cefa8a255 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -547,7 +547,7 @@ impl Scanner { let key_vec = key.to_bytes().as_ref().to_vec(); - // TODO: These lines are the ones which will cause a really long-lived lock acquisiton + // TODO: These lines are the ones which will cause a really long-lived lock acquisition for output in network.get_outputs(&block, key).await { assert_eq!(output.key(), key); if output.balance().amount.0 >= N::DUST { diff --git a/processor/src/multisigs/scheduler.rs b/processor/src/multisigs/scheduler.rs index cd1795852..abc81a80b 100644 --- a/processor/src/multisigs/scheduler.rs +++ b/processor/src/multisigs/scheduler.rs @@ -18,7 +18,7 @@ pub struct Scheduler { key: ::G, coin: Coin, - // Serai, when it has more outputs expected than it can handle in a single tranaction, will + // Serai, when it has more outputs expected than it can handle in a single transaction, will // schedule the outputs to be handled later. Immediately, it just creates additional outputs // which will eventually handle those outputs // @@ -321,7 +321,7 @@ impl Scheduler { // If we don't have UTXOs available, don't try to continue if self.utxos.is_empty() { - log::info!("no utxos currently avilable"); + log::info!("no utxos currently available"); return plans; } diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index a04c5d1e0..b47f0f033 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -507,14 +507,14 @@ impl Network for Bitcoin { // The output should be ~36 bytes, or 144 weight units // The overhead should be ~20 bytes at most, or 80 weight units // 684 weight units, 171 vbytes, round up to 200 - // 200 vbytes at 1 sat/weight (our current minumum fee, 4 sat/vbyte) = 800 sat fee for the + // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the // aggregation TX const COST_TO_AGGREGATE: u64 = 800; // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes // While our inputs are entirely SegWit, such fine tuning is not necessary and could create - // issues in the future (if the size decreases or we mis-evaluate it) + // issues in the future (if the size decreases or we misevaluate it) // It also offers a minimal amount of benefit when we are able to logarithmically accumulate // inputs // For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and diff --git a/processor/src/plan.rs b/processor/src/plan.rs index b25d50be0..3e10c7d39 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -77,7 +77,7 @@ impl Payment { pub struct Plan { pub key: ::G, pub inputs: Vec, - /// The payments this Plan is inteded to create. + /// The payments this Plan is intended to create. /// /// This should only contain payments leaving Serai. While it is acceptable for users to enter /// Serai's address(es) as the payment address, as that'll be handled by anything which expects @@ -152,7 +152,7 @@ impl Plan { let change = if let Some(change) = &self.change { change.clone().try_into().map_err(|_| { io::Error::other(format!( - "an address we said to use as change couldn't be convered to a Vec: {}", + "an address we said to use as change couldn't be converted to a Vec: {}", change.to_string(), )) })? diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index ee616b7c5..ef5b572b2 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -142,7 +142,7 @@ pub async fn test_no_deadlock_in_multisig_completed(network: N) { } }; - // The ack_block acquisiton shows the Scanner isn't maintaining the lock on its own thread after + // The ack_block acquisition shows the Scanner isn't maintaining the lock on its own thread after // emitting the Block event // TODO: This is incomplete. Also test after emitting Completed let mut txn = db.txn(); diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index 94ea6a7a1..fd713ca1b 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -108,7 +108,7 @@ impl Coin { // more liquidity, the only reason we'd have so many coins from a network is if there's no DEX // on-chain // There's probably no chain with so many *worthwhile* coins and no on-chain DEX -// This could probably be just 4, yet 8 is a hedge for the unforseen +// This could probably be just 4, yet 8 is a hedge for the unforeseen // If necessary, this can be increased with a fork pub const MAX_COINS_PER_NETWORK: u32 = 8; diff --git a/substrate/signals/pallet/src/lib.rs b/substrate/signals/pallet/src/lib.rs index 575bc6479..3fad27c92 100644 --- a/substrate/signals/pallet/src/lib.rs +++ b/substrate/signals/pallet/src/lib.rs @@ -57,7 +57,7 @@ pub mod pallet { pub struct RegisteredRetirementSignal { in_favor_of: [u8; 32], registrant: T::AccountId, - registed_at: BlockNumberFor, + registered_at: BlockNumberFor, } impl RegisteredRetirementSignal { @@ -135,10 +135,10 @@ pub mod pallet { RetirementSignalLockedIn, RetirementSignalAlreadyRegistered, NotRetirementSignalRegistrant, - NonExistantRetirementSignal, + NonExistentRetirementSignal, ExpiredRetirementSignal, NotValidator, - RevokingNonExistantFavor, + RevokingNonExistentFavor, } // 80% threshold @@ -236,7 +236,7 @@ pub mod pallet { for_network: NetworkId, ) -> DispatchResult { if !Favors::::contains_key((signal_id, for_network), account) { - Err::<(), _>(Error::::RevokingNonExistantFavor)?; + Err::<(), _>(Error::::RevokingNonExistentFavor)?; } Favors::::remove((signal_id, for_network), account); Self::deposit_event(Event::::FavorRevoked { signal_id, by: account, for_network }); @@ -275,7 +275,7 @@ pub mod pallet { let signal = RegisteredRetirementSignal { in_favor_of, registrant: account, - registed_at: frame_system::Pallet::::block_number(), + registered_at: frame_system::Pallet::::block_number(), }; let signal_id = signal.id(); @@ -301,7 +301,7 @@ pub mod pallet { let account = ensure_signed(origin)?; let Some(registered_signal) = RegisteredRetirementSignals::::get(retirement_signal_id) else { - return Err::<(), _>(Error::::NonExistantRetirementSignal.into()); + return Err::<(), _>(Error::::NonExistentRetirementSignal.into()); }; if account != registered_signal.registrant { Err::<(), _>(Error::::NotRetirementSignalRegistrant)?; @@ -341,7 +341,7 @@ pub mod pallet { // We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration // process let Some(registered_signal) = RegisteredRetirementSignals::::get(signal_id) else { - return Err::<(), _>(Error::::NonExistantRetirementSignal.into()); + return Err::<(), _>(Error::::NonExistentRetirementSignal.into()); }; // Check the signal isn't out of date @@ -350,7 +350,7 @@ pub mod pallet { // The reason to still have it is because locking in a dated runtime may cause a corrupt // blockchain and lead to a failure in system integrity // `Halt`, which doesn't have this check, at worst causes temporary downtime - if (registered_signal.registed_at + T::RetirementValidityDuration::get().into()) < + if (registered_signal.registered_at + T::RetirementValidityDuration::get().into()) < frame_system::Pallet::::block_number() { Err::<(), _>(Error::::ExpiredRetirementSignal)?; @@ -448,7 +448,7 @@ pub mod pallet { // Check this Signal exists (which would've been implied by Favors for it existing) if let SignalId::Retirement(signal_id) = signal_id { if RegisteredRetirementSignals::::get(signal_id).is_none() { - Err::<(), _>(Error::::NonExistantRetirementSignal)?; + Err::<(), _>(Error::::NonExistentRetirementSignal)?; } } } diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index b98ec04eb..d50c12b79 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -115,7 +115,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { .await; // Confirm the key pair - // TODO: Beter document network_latest_finalized_block's genesis state, and error if a set claims + // TODO: Better document network_latest_finalized_block's genesis state, and error if a set claims // [0; 32] was finalized let context = SubstrateContext { serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(), diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 905b32db9..8685af047 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -272,7 +272,7 @@ fn send_test() { for (i, coordinator) in coordinators.iter_mut().enumerate() { if !participating.contains(&i) { coordinator.publish_transacton(&ops, &tx).await; - // Tell them of it as a completion of the relevant signing nodess + // Tell them of it as a completion of the relevant signing nodes coordinator .send_message(messages::sign::CoordinatorMessage::Completed { session: Session(0), @@ -297,8 +297,8 @@ fn send_test() { } // TODO: Test the Eventuality from the blockchain, instead of from the coordinator - // TODO: Test what happenns when Completed is sent with a non-existent TX ID - // TODO: Test what happenns when Completed is sent with a non-completing TX ID + // TODO: Test what happens when Completed is sent with a non-existent TX ID + // TODO: Test what happens when Completed is sent with a non-completing TX ID }); } }