From 227f611ad2971b83b1c2220fe66a714ac30cca97 Mon Sep 17 00:00:00 2001 From: Mehul Mathur Date: Sat, 5 Oct 2024 19:28:32 +0530 Subject: [PATCH] chore: enforce existing clippy fixed lints check (#223) # Rationale for this change This change enforces the fixed lint groups checks in proof-of-sql lib. # What changes are included in this PR? Denied the existing lints fixed & fixed the new warnings on updated main. # Are these changes tested? Yes. --- Cargo.toml | 14 ++++++- crates/proof-of-sql-parser/src/identifier.rs | 5 +-- .../src/intermediate_ast_tests.rs | 2 +- .../src/intermediate_decimal.rs | 11 ++++-- .../src/posql_time/timestamp.rs | 10 ++--- .../src/posql_time/timezone.rs | 6 +-- .../src/posql_time/unit.rs | 6 +-- .../proof-of-sql/benches/bench_append_rows.rs | 5 ++- crates/proof-of-sql/benches/jaeger_benches.rs | 6 +-- crates/proof-of-sql/benches/scaffold/mod.rs | 8 ++-- .../proof-of-sql/examples/hello_world/main.rs | 4 +- .../proof-of-sql/examples/posql_db/README.md | 2 +- .../examples/posql_db/commit_accessor.rs | 4 +- .../examples/posql_db/csv_accessor.rs | 2 +- crates/proof-of-sql/examples/posql_db/main.rs | 2 +- .../src/base/bit/bit_matrix_test.rs | 4 +- .../src/base/commitment/column_bounds.rs | 8 +--- .../column_commitment_metadata_map.rs | 1 + .../src/base/commitment/committable_column.rs | 2 +- .../src/base/commitment/naive_commitment.rs | 3 +- .../base/commitment/naive_commitment_test.rs | 3 ++ .../src/base/commitment/query_commitments.rs | 3 ++ .../src/base/commitment/table_commitment.rs | 3 +- .../base/commitment/test_evaluation_proof.rs | 2 +- .../arrow_array_to_column_conversion.rs | 4 +- .../proof-of-sql/src/base/database/column.rs | 15 ++++--- .../src/base/database/owned_column.rs | 8 ++-- .../database/owned_table_test_accessor.rs | 2 +- .../src/base/database/owned_table_utility.rs | 15 ++++--- .../src/base/database/record_batch_utility.rs | 2 +- .../base/database/test_accessor_utility.rs | 22 +++++------ .../src/base/encode/scalar_varint.rs | 8 ++-- crates/proof-of-sql/src/base/math/log.rs | 4 +- .../base/polynomial/composite_polynomial.rs | 2 +- .../src/base/polynomial/interpolate_test.rs | 2 +- .../base/polynomial/multilinear_extension.rs | 4 +- .../src/base/proof/keccak256_transcript.rs | 6 +-- .../src/base/proof/merlin_transcript_core.rs | 6 +-- .../proof-of-sql/src/base/proof/transcript.rs | 2 +- .../src/base/slice_ops/mul_add_assign_test.rs | 8 ++-- .../src/base/slice_ops/slice_cast_test.rs | 10 ++--- .../dory/dory_inner_product.rs | 6 +-- .../src/proof_primitive/dory/dory_reduce.rs | 4 +- .../dory/dynamic_dory_structure.rs | 13 ++----- .../proof_primitive/dory/extended_state.rs | 2 +- .../src/proof_primitive/dory/rand_util.rs | 1 + .../proof_primitive/dory/scalar_product.rs | 2 +- .../src/proof_primitive/dory/state.rs | 2 +- .../proof_primitive/dory/vmv_state_test.rs | 2 +- .../proof_primitive/sumcheck/proof_test.rs | 6 +-- .../src/sql/parse/query_expr_tests.rs | 8 ++-- .../src/sql/postprocessing/test_utility.rs | 4 ++ .../src/sql/proof/provable_query_result.rs | 1 + .../sql/proof/provable_query_result_test.rs | 1 + .../src/sql/proof/query_proof_test.rs | 2 +- .../src/sql/proof/query_result.rs | 1 + .../sql/proof/verifiable_query_result_test.rs | 2 +- .../verifiable_query_result_test_utility.rs | 2 +- .../sql/proof_exprs/add_subtract_expr_test.rs | 2 +- .../src/sql/proof_exprs/and_expr_test.rs | 2 +- .../src/sql/proof_exprs/equals_expr_test.rs | 2 +- .../sql/proof_exprs/inequality_expr_test.rs | 4 +- .../src/sql/proof_exprs/literal_expr_test.rs | 2 +- .../src/sql/proof_exprs/multiply_expr_test.rs | 2 +- .../src/sql/proof_exprs/not_expr_test.rs | 2 +- .../src/sql/proof_exprs/or_expr_test.rs | 2 +- .../filter_exec_test_dishonest_prover.rs | 2 +- .../src/sql/proof_plans/group_by_exec_test.rs | 4 +- .../proof-of-sql/src/tests/sol_test_util.rs | 4 +- .../tests/decimal_integration_tests.rs | 16 ++++---- .../proof-of-sql/tests/integration_tests.rs | 6 +-- .../tests/timestamp_integration_tests.rs | 39 ++++++++++++------- 72 files changed, 208 insertions(+), 176 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7cf60d375..be07592ff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,4 +64,16 @@ zerocopy = { version = "0.7.34" } missing_docs = "warn" [workspace.lints.clippy] -missing_panics_doc = "warn" +missing_panics_doc = "deny" +doc_markdown = "deny" +match_same_arms = "deny" +similar_names = "deny" +many_single_char_names = "deny" +explicit_iter_loop = "deny" +implicit_clone = "deny" +uninlined_format_args = "deny" +semicolon_if_nothing_returned = "deny" +unnested_or_patterns = "deny" +unreadable_literal = "deny" +must_use_candidate = "deny" +range_plus_one = "deny" \ No newline at end of file diff --git a/crates/proof-of-sql-parser/src/identifier.rs b/crates/proof-of-sql-parser/src/identifier.rs index 584c54cbf..d5cae3125 100644 --- a/crates/proof-of-sql-parser/src/identifier.rs +++ b/crates/proof-of-sql-parser/src/identifier.rs @@ -195,11 +195,10 @@ mod tests { "to_timestamp", ]; - for keyword in keywords.iter() { + for keyword in &keywords { assert!( Identifier::from_str(keyword).is_err(), - "Should not parse keyword as identifier: {}", - keyword + "Should not parse keyword as identifier: {keyword}" ); } } diff --git a/crates/proof-of-sql-parser/src/intermediate_ast_tests.rs b/crates/proof-of-sql-parser/src/intermediate_ast_tests.rs index 61dd38aa4..b23e12258 100644 --- a/crates/proof-of-sql-parser/src/intermediate_ast_tests.rs +++ b/crates/proof-of-sql-parser/src/intermediate_ast_tests.rs @@ -1108,7 +1108,7 @@ fn we_cannot_parse_queries_with_long_identifiers() { } //////////////////////////////// -/// Tests for the GroupByClause +/// Tests for the `GroupByClause` //////////////////////////////// #[test] fn we_can_parse_a_simple_group_by_clause() { diff --git a/crates/proof-of-sql-parser/src/intermediate_decimal.rs b/crates/proof-of-sql-parser/src/intermediate_decimal.rs index 1cfa7e1c9..dd28f594d 100644 --- a/crates/proof-of-sql-parser/src/intermediate_decimal.rs +++ b/crates/proof-of-sql-parser/src/intermediate_decimal.rs @@ -210,7 +210,7 @@ mod tests { }; assert_eq!( i128::try_from(valid_decimal), - Ok(170141183460469231731687303715884105727i128) + Ok(170_141_183_460_469_231_731_687_303_715_884_105_727_i128) ); let valid_decimal = IntermediateDecimal { @@ -228,7 +228,7 @@ mod tests { }; assert_eq!( i128::try_from(valid_decimal_negative), - Ok(-170141183460469231731687303715884105728i128) + Ok(-170_141_183_460_469_231_731_687_303_715_884_105_728_i128) ); let non_integer = IntermediateDecimal { @@ -242,7 +242,10 @@ mod tests { let valid_decimal = IntermediateDecimal { value: BigDecimal::from_str("9223372036854775807").unwrap(), }; - assert_eq!(i64::try_from(valid_decimal), Ok(9223372036854775807i64)); + assert_eq!( + i64::try_from(valid_decimal), + Ok(9_223_372_036_854_775_807_i64) + ); let valid_decimal = IntermediateDecimal { value: BigDecimal::from_str("123.000").unwrap(), @@ -259,7 +262,7 @@ mod tests { }; assert_eq!( i64::try_from(valid_decimal_negative), - Ok(-9223372036854775808i64) + Ok(-9_223_372_036_854_775_808_i64) ); let non_integer = IntermediateDecimal { diff --git a/crates/proof-of-sql-parser/src/posql_time/timestamp.rs b/crates/proof-of-sql-parser/src/posql_time/timestamp.rs index f036bac14..d54fd28b0 100644 --- a/crates/proof-of-sql-parser/src/posql_time/timestamp.rs +++ b/crates/proof-of-sql-parser/src/posql_time/timestamp.rs @@ -147,7 +147,7 @@ mod tests { #[test] fn test_unix_epoch_time_timezone() { - let unix_time = 1231006505; // Unix time as string + let unix_time = 1_231_006_505; // Unix time as string let expected_timezone = PoSQLTimeZone::Utc; // Unix time should always be UTC let result = PoSQLTimestamp::to_timestamp(unix_time).unwrap(); assert_eq!(result.timezone, expected_timezone); @@ -155,7 +155,7 @@ mod tests { #[test] fn test_unix_epoch_timestamp_parsing() { - let unix_time = 1231006505; // Example Unix timestamp (seconds since epoch) + let unix_time = 1_231_006_505; // Example Unix timestamp (seconds since epoch) let expected_datetime = Utc.timestamp_opt(unix_time, 0).unwrap(); let expected_unit = PoSQLTimeUnit::Second; // Assuming basic second precision for Unix timestamp let input = unix_time; // Simulate input as string since Unix times are often transmitted as strings @@ -235,8 +235,7 @@ mod tests { for input in inputs { assert!( DateTime::parse_from_rfc3339(input).is_ok(), - "Should parse correctly: {}", - input + "Should parse correctly: {input}" ); } } @@ -286,8 +285,7 @@ mod tests { for input in incorrect_formats { assert!( DateTime::parse_from_rfc3339(input).is_err(), - "Should reject incorrect format: {}", - input + "Should reject incorrect format: {input}" ); } } diff --git a/crates/proof-of-sql-parser/src/posql_time/timezone.rs b/crates/proof-of-sql-parser/src/posql_time/timezone.rs index c4f775afd..39cdfbae9 100644 --- a/crates/proof-of-sql-parser/src/posql_time/timezone.rs +++ b/crates/proof-of-sql-parser/src/posql_time/timezone.rs @@ -83,19 +83,19 @@ mod timezone_parsing_tests { #[test] fn test_display_fixed_offset_positive() { let timezone = timezone::PoSQLTimeZone::FixedOffset(4500); // +01:15 - assert_eq!(format!("{}", timezone), "+01:15"); + assert_eq!(format!("{timezone}"), "+01:15"); } #[test] fn test_display_fixed_offset_negative() { let timezone = timezone::PoSQLTimeZone::FixedOffset(-3780); // -01:03 - assert_eq!(format!("{}", timezone), "-01:03"); + assert_eq!(format!("{timezone}"), "-01:03"); } #[test] fn test_display_utc() { let timezone = timezone::PoSQLTimeZone::Utc; - assert_eq!(format!("{}", timezone), "+00:00"); + assert_eq!(format!("{timezone}"), "+00:00"); } } diff --git a/crates/proof-of-sql-parser/src/posql_time/unit.rs b/crates/proof-of-sql-parser/src/posql_time/unit.rs index 26503e369..215fa7959 100644 --- a/crates/proof-of-sql-parser/src/posql_time/unit.rs +++ b/crates/proof-of-sql-parser/src/posql_time/unit.rs @@ -64,7 +64,7 @@ mod time_unit_tests { let invalid_precisions = [ "1", "2", "4", "5", "7", "8", "10", "zero", "three", "cat", "-1", "-2", ]; // Testing all your various invalid inputs - for &value in invalid_precisions.iter() { + for &value in &invalid_precisions { let result = PoSQLTimeUnit::try_from(value); assert!(matches!( result, @@ -88,7 +88,7 @@ mod time_unit_tests { #[test] fn test_rfc3339_timestamp_with_microseconds() { let input = "2023-06-26T12:34:56.123456Z"; - let expected = Utc.ymd(2023, 6, 26).and_hms_micro(12, 34, 56, 123456); + let expected = Utc.ymd(2023, 6, 26).and_hms_micro(12, 34, 56, 123_456); let result = PoSQLTimestamp::try_from(input).unwrap(); assert_eq!(result.timeunit(), PoSQLTimeUnit::Microsecond); assert_eq!( @@ -99,7 +99,7 @@ mod time_unit_tests { #[test] fn test_rfc3339_timestamp_with_nanoseconds() { let input = "2023-06-26T12:34:56.123456789Z"; - let expected = Utc.ymd(2023, 6, 26).and_hms_nano(12, 34, 56, 123456789); + let expected = Utc.ymd(2023, 6, 26).and_hms_nano(12, 34, 56, 123_456_789); let result = PoSQLTimestamp::try_from(input).unwrap(); assert_eq!(result.timeunit(), PoSQLTimeUnit::Nanosecond); assert_eq!( diff --git a/crates/proof-of-sql/benches/bench_append_rows.rs b/crates/proof-of-sql/benches/bench_append_rows.rs index 72eef7913..023c04cff 100644 --- a/crates/proof-of-sql/benches/bench_append_rows.rs +++ b/crates/proof-of-sql/benches/bench_append_rows.rs @@ -29,7 +29,7 @@ use rand::Rng; use std::ops::Deref; /// Bench dory performance when appending rows to a table. This includes the computation of -/// commitments. Chose the number of columns to randomly generate across supported PoSQL +/// commitments. Chose the number of columns to randomly generate across supported `PoSQL` /// data types, and choose the number of rows to append at a time. /// /// ```text @@ -72,7 +72,8 @@ fn bench_append_rows(c: &mut Criterion, cols: usize, rows: usize) { }); } -/// Generates a random OwnedTable with a specified number of columns +/// Generates a random [`OwnedTable`] with a specified number of columns +#[must_use] pub fn generate_random_owned_table( num_columns: usize, num_rows: usize, diff --git a/crates/proof-of-sql/benches/jaeger_benches.rs b/crates/proof-of-sql/benches/jaeger_benches.rs index 00090098c..04d35e7bd 100644 --- a/crates/proof-of-sql/benches/jaeger_benches.rs +++ b/crates/proof-of-sql/benches/jaeger_benches.rs @@ -5,7 +5,7 @@ //! cargo bench -p proof-of-sql --bench jaeger_benches InnerProductProof //! cargo bench -p proof-of-sql --bench jaeger_benches Dory --features="test" //! ``` -//! Then, navigate to http://localhost:16686 to view the traces. +//! Then, navigate to to view the traces. #[cfg(feature = "test")] use ark_std::test_rng; @@ -45,7 +45,7 @@ fn main() { "InnerProductProof" => { // Run 3 times to ensure that warm-up of the GPU has occurred. for _ in 0..3 { - for (title, query, columns) in QUERIES.iter() { + for (title, query, columns) in QUERIES { jaeger_scaffold::(title, query, columns, SIZE, &(), &()); } } @@ -60,7 +60,7 @@ fn main() { let verifier_setup = DoryVerifierPublicSetup::new(&vs, 10); for _ in 0..3 { - for (title, query, columns) in QUERIES.iter() { + for (title, query, columns) in QUERIES { jaeger_scaffold::( title, query, diff --git a/crates/proof-of-sql/benches/scaffold/mod.rs b/crates/proof-of-sql/benches/scaffold/mod.rs index 4a3e665b5..c82b212bd 100644 --- a/crates/proof-of-sql/benches/scaffold/mod.rs +++ b/crates/proof-of-sql/benches/scaffold/mod.rs @@ -83,7 +83,7 @@ pub fn criterion_scaffold( prover_setup: &CP::ProverPublicSetup<'_>, verifier_setup: &CP::VerifierPublicSetup<'_>, ) { - let mut group = c.benchmark_group(format!("{} - {}", title, query)); + let mut group = c.benchmark_group(format!("{title} - {query}")); group.sample_size(10); group.plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); init_backend(); @@ -102,10 +102,12 @@ pub fn criterion_scaffold( &mut rng, ); group.bench_function("Generate Proof", |b| { - b.iter(|| VerifiableQueryResult::::new(query.proof_expr(), &accessor, prover_setup)) + b.iter(|| { + VerifiableQueryResult::::new(query.proof_expr(), &accessor, prover_setup) + }); }); group.bench_function("Verify Proof", |b| { - b.iter(|| result.verify(query.proof_expr(), &accessor, verifier_setup)) + b.iter(|| result.verify(query.proof_expr(), &accessor, verifier_setup)); }); } } diff --git a/crates/proof-of-sql/examples/hello_world/main.rs b/crates/proof-of-sql/examples/hello_world/main.rs index 00fd22876..4e5607151 100644 --- a/crates/proof-of-sql/examples/hello_world/main.rs +++ b/crates/proof-of-sql/examples/hello_world/main.rs @@ -14,7 +14,7 @@ use std::{ /// /// Will panic if flushing the output fails, which can happen due to issues with the underlying output stream. fn start_timer(message: &str) -> Instant { - print!("{}...", message); + print!("{message}..."); stdout().flush().unwrap(); Instant::now() } @@ -70,7 +70,7 @@ fn main() { println!("Query result: {:?}", result.table); } Err(e) => { - println!("Error: {:?}", e); + println!("Error: {e:?}"); } } } diff --git a/crates/proof-of-sql/examples/posql_db/README.md b/crates/proof-of-sql/examples/posql_db/README.md index fc8d858b5..d64b81071 100644 --- a/crates/proof-of-sql/examples/posql_db/README.md +++ b/crates/proof-of-sql/examples/posql_db/README.md @@ -1,4 +1,4 @@ -# posql_db +# `posql_db` Example demonstrating an implementation of a simple csv-backed database with Proof of SQL capabilities. diff --git a/crates/proof-of-sql/examples/posql_db/commit_accessor.rs b/crates/proof-of-sql/examples/posql_db/commit_accessor.rs index 61989ede3..cd3fa5d95 100644 --- a/crates/proof-of-sql/examples/posql_db/commit_accessor.rs +++ b/crates/proof-of-sql/examples/posql_db/commit_accessor.rs @@ -21,12 +21,12 @@ impl Deserialize<'a>> CommitAccessor { table_ref: &TableRef, commit: &TableCommitment, ) -> Result<(), Box> { - let path = self.base_path.join(format!("{}.commit", table_ref)); + let path = self.base_path.join(format!("{table_ref}.commit")); fs::write(path, postcard::to_allocvec(commit)?)?; Ok(()) } pub fn load_commit(&mut self, table_ref: TableRef) -> Result<(), Box> { - let path = self.base_path.join(format!("{}.commit", table_ref)); + let path = self.base_path.join(format!("{table_ref}.commit")); let commit = postcard::from_bytes(&fs::read(path)?)?; self.inner.insert(table_ref, commit); Ok(()) diff --git a/crates/proof-of-sql/examples/posql_db/csv_accessor.rs b/crates/proof-of-sql/examples/posql_db/csv_accessor.rs index cfce85ca0..2dbbfb64b 100644 --- a/crates/proof-of-sql/examples/posql_db/csv_accessor.rs +++ b/crates/proof-of-sql/examples/posql_db/csv_accessor.rs @@ -58,7 +58,7 @@ impl CsvDataAccessor { Ok(()) } fn get_table_path(&self, table_ref: &TableRef) -> PathBuf { - self.base_path.join(format!("{}.csv", table_ref)) + self.base_path.join(format!("{table_ref}.csv")) } pub fn write_table( &self, diff --git a/crates/proof-of-sql/examples/posql_db/main.rs b/crates/proof-of-sql/examples/posql_db/main.rs index 668e66f25..9ec7f2bc0 100644 --- a/crates/proof-of-sql/examples/posql_db/main.rs +++ b/crates/proof-of-sql/examples/posql_db/main.rs @@ -120,7 +120,7 @@ enum Commands { /// Will panic if the call to `stdout().flush()` fails, indicating that the /// standard output stream could not be flushed fn start_timer(message: &str) -> Instant { - print!("{}...", message); + print!("{message}..."); stdout().flush().unwrap(); Instant::now() } diff --git a/crates/proof-of-sql/src/base/bit/bit_matrix_test.rs b/crates/proof-of-sql/src/base/bit/bit_matrix_test.rs index e56a8d48f..59dda03dd 100644 --- a/crates/proof-of-sql/src/base/bit/bit_matrix_test.rs +++ b/crates/proof-of-sql/src/base/bit/bit_matrix_test.rs @@ -29,7 +29,7 @@ fn we_can_compute_the_bit_matrix_for_data_with_a_single_varying_bit() { let matrix = compute_varying_bit_matrix(&alloc, &data, &dist); assert_eq!(matrix.len(), 1); let slice1 = vec![true, false]; - assert_eq!(matrix[0], slice1) + assert_eq!(matrix[0], slice1); } #[test] @@ -40,7 +40,7 @@ fn we_can_compute_the_bit_matrix_for_data_with_a_varying_sign_bit() { let matrix = compute_varying_bit_matrix(&alloc, &data, &dist); assert_eq!(matrix.len(), 1); let slice1 = vec![false, true]; - assert_eq!(matrix[0], slice1) + assert_eq!(matrix[0], slice1); } #[test] diff --git a/crates/proof-of-sql/src/base/commitment/column_bounds.rs b/crates/proof-of-sql/src/base/commitment/column_bounds.rs index 971577585..c4fcdbabe 100644 --- a/crates/proof-of-sql/src/base/commitment/column_bounds.rs +++ b/crates/proof-of-sql/src/base/commitment/column_bounds.rs @@ -625,15 +625,11 @@ mod tests { for ((bound_a, name_a), (bound_b, name_b)) in bounds.iter().tuple_combinations() { assert!( bound_a.try_union(*bound_b).is_err(), - "Expected error when trying to union {} with {}", - name_a, - name_b + "Expected error when trying to union {name_a} with {name_b}" ); assert!( bound_b.try_union(*bound_a).is_err(), - "Expected error when trying to union {} with {}", - name_b, - name_a + "Expected error when trying to union {name_b} with {name_a}" ); } } diff --git a/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs b/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs index 4a72620ad..5b43a98ed 100644 --- a/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs +++ b/crates/proof-of-sql/src/base/commitment/column_commitment_metadata_map.rs @@ -326,6 +326,7 @@ mod tests { )); } + #[allow(clippy::similar_names)] #[test] fn we_cannot_perform_arithmetic_on_mismatched_metadata_maps_with_same_column_counts() { let id_a = "column_a"; diff --git a/crates/proof-of-sql/src/base/commitment/committable_column.rs b/crates/proof-of-sql/src/base/commitment/committable_column.rs index 957dac7fa..79a087789 100644 --- a/crates/proof-of-sql/src/base/commitment/committable_column.rs +++ b/crates/proof-of-sql/src/base/commitment/committable_column.rs @@ -239,7 +239,7 @@ mod tests { .into(), ); - assert_eq!(res_committable_column, test_committable_column) + assert_eq!(res_committable_column, test_committable_column); } #[test] diff --git a/crates/proof-of-sql/src/base/commitment/naive_commitment.rs b/crates/proof-of-sql/src/base/commitment/naive_commitment.rs index 4891e5b60..a2fc78480 100644 --- a/crates/proof-of-sql/src/base/commitment/naive_commitment.rs +++ b/crates/proof-of-sql/src/base/commitment/naive_commitment.rs @@ -44,7 +44,7 @@ impl Neg for NaiveCommitment { impl SubAssign for NaiveCommitment { fn sub_assign(&mut self, rhs: Self) { - self.add_assign(rhs.neg()) + self.add_assign(rhs.neg()); } } @@ -154,6 +154,7 @@ impl Commitment for NaiveCommitment { } } +#[allow(clippy::similar_names)] #[test] fn we_can_compute_commitments_from_commitable_columns() { let column_a = [1i64, 10, -5, 0, 10]; diff --git a/crates/proof-of-sql/src/base/commitment/naive_commitment_test.rs b/crates/proof-of-sql/src/base/commitment/naive_commitment_test.rs index c46087084..40dac3858 100644 --- a/crates/proof-of-sql/src/base/commitment/naive_commitment_test.rs +++ b/crates/proof-of-sql/src/base/commitment/naive_commitment_test.rs @@ -143,6 +143,7 @@ fn we_can_subtract_naive_commitments_with_both_empty() { // AddAssign Tests +#[allow(clippy::similar_names)] #[test] fn we_can_add_assign_naive_commitments() { let column_a: Vec = [1i64, 10, -5, 0, 10].iter().map(|bi| bi.into()).collect(); @@ -165,6 +166,7 @@ fn we_can_add_assign_naive_commitments() { assert_eq!(commitment_b_mutable, commitment_sum); } +#[allow(clippy::similar_names)] #[test] fn we_can_add_assign_naive_commitments_with_one_empty() { let column_a: Vec = [1i64, 10, -5, 0, 10].iter().map(|bi| bi.into()).collect(); @@ -220,6 +222,7 @@ fn we_can_sub_assign_naive_commitments() { assert_eq!(commitment_a_mutable, commitment_difference); } +#[allow(clippy::similar_names)] #[test] fn we_can_sub_assign_naive_commitments_with_one_empty() { let column_a: Vec = [1i64, 10, -5, 0, 10].iter().map(|bi| bi.into()).collect(); diff --git a/crates/proof-of-sql/src/base/commitment/query_commitments.rs b/crates/proof-of-sql/src/base/commitment/query_commitments.rs index ce4ffd1c3..75df048ba 100644 --- a/crates/proof-of-sql/src/base/commitment/query_commitments.rs +++ b/crates/proof-of-sql/src/base/commitment/query_commitments.rs @@ -200,6 +200,7 @@ mod tests { assert_eq!(query_commitments.get_length(no_rows_id), 0); } + #[allow(clippy::similar_names)] #[test] fn we_can_get_commitment_of_a_column() { let column_a_id: Identifier = "column_a".parse().unwrap(); @@ -249,6 +250,7 @@ mod tests { ); } + #[allow(clippy::similar_names)] #[test] fn we_can_get_schema_of_tables() { let column_a_id: Identifier = "column_a".parse().unwrap(); @@ -323,6 +325,7 @@ mod tests { assert_eq!(query_commitments.lookup_schema(no_columns_id), vec![]); } + #[allow(clippy::similar_names)] #[test] fn we_can_get_query_commitments_from_accessor() { let public_parameters = PublicParameters::test_rand(4, &mut test_rng()); diff --git a/crates/proof-of-sql/src/base/commitment/table_commitment.rs b/crates/proof-of-sql/src/base/commitment/table_commitment.rs index 61f463323..0f9e21783 100644 --- a/crates/proof-of-sql/src/base/commitment/table_commitment.rs +++ b/crates/proof-of-sql/src/base/commitment/table_commitment.rs @@ -752,7 +752,7 @@ mod tests { table_commitment_clone .append_owned_table(&append_columns, &()) .unwrap(); - assert_eq!(table_commitment, table_commitment_clone) + assert_eq!(table_commitment, table_commitment_clone); } #[test] @@ -822,6 +822,7 @@ mod tests { assert_eq!(table_commitment.column_commitments(), &column_commitments); } + #[allow(clippy::similar_names)] #[test] fn we_cannot_append_columns_of_mixed_length_to_table_commitment() { let column_id_a: Identifier = "column_a".parse().unwrap(); diff --git a/crates/proof-of-sql/src/base/commitment/test_evaluation_proof.rs b/crates/proof-of-sql/src/base/commitment/test_evaluation_proof.rs index e9422783f..1663ae83a 100644 --- a/crates/proof-of-sql/src/base/commitment/test_evaluation_proof.rs +++ b/crates/proof-of-sql/src/base/commitment/test_evaluation_proof.rs @@ -6,7 +6,7 @@ pub struct TestEvaluationProof {} /// This should only be used for the purpose of unit testing. /// For now it is only being created for the purpose of implementing -/// CommitmentEvaluationProof for TestEvaluationProof. +/// [`CommitmentEvaluationProof`] for [`TestEvaluationProof`]. pub enum TestErrorType {} impl CommitmentEvaluationProof for TestEvaluationProof { diff --git a/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs b/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs index c343cbd04..a3e5e1fd2 100644 --- a/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs +++ b/crates/proof-of-sql/src/base/database/arrow_array_to_column_conversion.rs @@ -57,7 +57,7 @@ pub enum ArrowArrayToColumnConversionError { /// This trait is used to provide utility functions to convert [`ArrayRef`]s into proof types (Column, Scalars, etc.) pub trait ArrayRefExt { - /// Convert an ArrayRef into a Proof of SQL Vec + /// Convert an [`ArrayRef`] into a Proof of SQL Vec /// /// Note: this function must not be called from unsupported arrays or arrays with nulls. /// It should only be used during testing. @@ -537,7 +537,7 @@ mod tests { let array: ArrayRef = Arc::new(builder.finish().with_precision_and_scale(76, 0).unwrap()); let result = array.to_column::(&alloc, &(1..3), None); - assert!(result.is_err()) + assert!(result.is_err()); } #[test] diff --git a/crates/proof-of-sql/src/base/database/column.rs b/crates/proof-of-sql/src/base/database/column.rs index b65447e5c..efe46ed06 100644 --- a/crates/proof-of-sql/src/base/database/column.rs +++ b/crates/proof-of-sql/src/base/database/column.rs @@ -177,8 +177,7 @@ impl<'a, S: Scalar> Column<'a, S> { Self::Int(col) => alloc.alloc_slice_fill_with(col.len(), |i| S::from(col[i])), Self::BigInt(col) => alloc.alloc_slice_fill_with(col.len(), |i| S::from(col[i])), Self::Int128(col) => alloc.alloc_slice_fill_with(col.len(), |i| S::from(col[i])), - Self::Scalar(col) => col, - Self::Decimal75(_, _, col) => col, + Self::Scalar(col) | Self::Decimal75(_, _, col) => col, Self::VarChar((_, scals)) => scals, Self::TimestampTZ(_, _, col) => { alloc.alloc_slice_fill_with(col.len(), |i| S::from(col[i])) @@ -835,7 +834,7 @@ mod tests { let precision = 10; let scale = 2; - let scals = [ + let scalar_values = [ Curve25519Scalar::from(1), Curve25519Scalar::from(2), Curve25519Scalar::from(3), @@ -858,7 +857,7 @@ mod tests { assert_eq!(column.len(), 3); assert!(!column.is_empty()); - let column = Column::VarChar((&["a", "b", "c"], &scals)); + let column = Column::VarChar((&["a", "b", "c"], &scalar_values)); assert_eq!(column.len(), 3); assert!(!column.is_empty()); @@ -866,7 +865,7 @@ mod tests { assert_eq!(column.len(), 3); assert!(!column.is_empty()); - let column = Column::Scalar(&scals); + let column = Column::Scalar(&scalar_values); assert_eq!(column.len(), 3); assert!(!column.is_empty()); @@ -985,17 +984,17 @@ mod tests { assert_eq!(column.column_type().byte_size(), 16); assert_eq!(column.column_type().bit_size(), 128); - let scals = [ + let scalar_values = [ Curve25519Scalar::from(1), Curve25519Scalar::from(2), Curve25519Scalar::from(3), ]; - let column = Column::VarChar((&["a", "b", "c", "d", "e"], &scals)); + let column = Column::VarChar((&["a", "b", "c", "d", "e"], &scalar_values)); assert_eq!(column.column_type().byte_size(), 32); assert_eq!(column.column_type().bit_size(), 256); - let column = Column::Scalar(&scals); + let column = Column::Scalar(&scalar_values); assert_eq!(column.column_type().byte_size(), 32); assert_eq!(column.column_type().bit_size(), 256); diff --git a/crates/proof-of-sql/src/base/database/owned_column.rs b/crates/proof-of-sql/src/base/database/owned_column.rs index 67d154b0f..0316ab552 100644 --- a/crates/proof-of-sql/src/base/database/owned_column.rs +++ b/crates/proof-of-sql/src/base/database/owned_column.rs @@ -237,8 +237,7 @@ impl OwnedColumn { /// assuming the underlying type is [i64], panicking if it is not. pub fn i64_iter(&self) -> impl Iterator { match self { - OwnedColumn::BigInt(col) => col.iter(), - OwnedColumn::TimestampTZ(_, _, col) => col.iter(), + OwnedColumn::TimestampTZ(_, _, col) | OwnedColumn::BigInt(col) => col.iter(), _ => panic!("Expected TimestampTZ or BigInt column"), } } @@ -265,8 +264,7 @@ impl OwnedColumn { /// assuming the underlying type is a [Scalar], panicking if it is not. pub fn scalar_iter(&self) -> impl Iterator { match self { - OwnedColumn::Scalar(col) => col.iter(), - OwnedColumn::Decimal75(_, _, col) => col.iter(), + OwnedColumn::Decimal75(_, _, col) | OwnedColumn::Scalar(col) => col.iter(), _ => panic!("Expected Scalar or Decimal75 column"), } } @@ -397,7 +395,7 @@ mod test { assert_eq!( compare_indexes_by_owned_columns_with_direction(&order_by_pairs, 1, 4), Ordering::Less - ) + ); } #[test] diff --git a/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs b/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs index 94752130b..5cf61c85a 100644 --- a/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs +++ b/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs @@ -9,7 +9,7 @@ use crate::base::{ use bumpalo::Bump; use proof_of_sql_parser::Identifier; -/// A test accessor that uses OwnedTable as the underlying table type. +/// A test accessor that uses [`OwnedTable`] as the underlying table type. /// Note: this is not optimized for performance, so should not be used for benchmarks. pub struct OwnedTableTestAccessor<'a, CP: CommitmentEvaluationProof> { tables: IndexMap, usize)>, diff --git a/crates/proof-of-sql/src/base/database/owned_table_utility.rs b/crates/proof-of-sql/src/base/database/owned_table_utility.rs index a0d3188ae..658dfb565 100644 --- a/crates/proof-of-sql/src/base/database/owned_table_utility.rs +++ b/crates/proof-of-sql/src/base/database/owned_table_utility.rs @@ -51,11 +51,12 @@ pub fn owned_table( /// Creates a `(Identifier, OwnedColumn)` pair for a smallint column. /// This is primarily intended for use in conjunction with [`owned_table`]. /// # Example -/// ```use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; +/// ```rust +/// use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; /// let result = owned_table::([ /// smallint("a", [1_i16, 2, 3]), /// ]); -///``` +/// ``` /// # Panics /// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn smallint( @@ -71,11 +72,12 @@ pub fn smallint( /// Creates a `(Identifier, OwnedColumn)` pair for an int column. /// This is primarily intended for use in conjunction with [`owned_table`]. /// # Example -/// ```use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; +/// ```rust +/// use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; /// let result = owned_table::([ /// int("a", [1, 2, 3]), /// ]); -///``` +/// ``` /// # Panics /// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn int( @@ -91,11 +93,12 @@ pub fn int( /// Creates a `(Identifier, OwnedColumn)` pair for a bigint column. /// This is primarily intended for use in conjunction with [`owned_table`]. /// # Example -/// ``` use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; +/// ```rust +/// use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; /// let result = owned_table::([ /// bigint("a", [1, 2, 3]), /// ]); -///]``` +/// ``` #[allow(clippy::missing_panics_doc)] pub fn bigint( name: impl Deref, diff --git a/crates/proof-of-sql/src/base/database/record_batch_utility.rs b/crates/proof-of-sql/src/base/database/record_batch_utility.rs index ae4df097d..d1180005b 100644 --- a/crates/proof-of-sql/src/base/database/record_batch_utility.rs +++ b/crates/proof-of-sql/src/base/database/record_batch_utility.rs @@ -118,7 +118,7 @@ impl ToArrow for Vec { /// /// # Panics /// - /// Will panic if the conversion to a Decimal128Array fails, which can happen if the data exceeds the specified precision and scale (38, 0). Ensure that all values are within the valid range for the Decimal128 type. + /// Will panic if the conversion to a [`Decimal128Array`](arrow::array::Decimal128Array) fails, which can happen if the data exceeds the specified precision and scale (38, 0). Ensure that all values are within the valid range for the Decimal128 type. fn to_array(self) -> Arc { Arc::new( arrow::array::Decimal128Array::from(self) diff --git a/crates/proof-of-sql/src/base/database/test_accessor_utility.rs b/crates/proof-of-sql/src/base/database/test_accessor_utility.rs index bac58bd9c..815532fc2 100644 --- a/crates/proof-of-sql/src/base/database/test_accessor_utility.rs +++ b/crates/proof-of-sql/src/base/database/test_accessor_utility.rs @@ -15,11 +15,11 @@ use rand::{ }; use std::sync::Arc; -/// Specify what form a randomly generated TestAccessor can take +/// Specify what form a randomly generated `TestAccessor` can take pub struct RandomTestAccessorDescriptor { - /// The minimum number of rows in the generated RecordBatch + /// The minimum number of rows in the generated `RecordBatch` pub min_rows: usize, - /// The maximum number of rows in the generated RecordBatch + /// The maximum number of rows in the generated `RecordBatch` pub max_rows: usize, /// The minimum value of the generated data pub min_value: i64, @@ -38,7 +38,7 @@ impl Default for RandomTestAccessorDescriptor { } } -/// Generate a DataFrame with random data +/// Generate a `DataFrame` with random data /// /// # Panics /// @@ -88,7 +88,7 @@ pub fn make_random_test_accessor_data( } ColumnType::BigInt => { column_fields.push(Field::new(*col_name, DataType::Int64, false)); - let values: Vec = values.to_vec(); + let values: Vec = values.clone(); columns.push(Arc::new(Int64Array::from(values))); } ColumnType::Int128 => { @@ -96,7 +96,7 @@ pub fn make_random_test_accessor_data( let values: Vec = values.iter().map(|x| *x as i128).collect(); columns.push(Arc::new( - Decimal128Array::from(values.to_vec()) + Decimal128Array::from(values.clone()) .with_precision_and_scale(38, 0) .unwrap(), )); @@ -110,7 +110,7 @@ pub fn make_random_test_accessor_data( let values: Vec = values.iter().map(|x| i256::from(*x)).collect(); columns.push(Arc::new( - Decimal256Array::from(values.to_vec()) + Decimal256Array::from(values.clone()) .with_precision_and_scale(precision.value(), *scale) .unwrap(), )); @@ -143,15 +143,15 @@ pub fn make_random_test_accessor_data( )); // Create the correct timestamp array based on the time unit let timestamp_array: Arc = match tu { - PoSQLTimeUnit::Second => Arc::new(TimestampSecondArray::from(values.to_vec())), + PoSQLTimeUnit::Second => Arc::new(TimestampSecondArray::from(values.clone())), PoSQLTimeUnit::Millisecond => { - Arc::new(TimestampMillisecondArray::from(values.to_vec())) + Arc::new(TimestampMillisecondArray::from(values.clone())) } PoSQLTimeUnit::Microsecond => { - Arc::new(TimestampMicrosecondArray::from(values.to_vec())) + Arc::new(TimestampMicrosecondArray::from(values.clone())) } PoSQLTimeUnit::Nanosecond => { - Arc::new(TimestampNanosecondArray::from(values.to_vec())) + Arc::new(TimestampNanosecondArray::from(values.clone())) } }; columns.push(timestamp_array); diff --git a/crates/proof-of-sql/src/base/encode/scalar_varint.rs b/crates/proof-of-sql/src/base/encode/scalar_varint.rs index cc11690ff..d904590b8 100644 --- a/crates/proof-of-sql/src/base/encode/scalar_varint.rs +++ b/crates/proof-of-sql/src/base/encode/scalar_varint.rs @@ -103,7 +103,7 @@ pub fn read_u256_varint(buf: &[u8]) -> Option<(U256, usize)> { } /// This function writes all the input scalars `scals` to the input buffer `buf`. -/// For that, the Varint together with the ZigZag encoding is used. +/// For that, the Varint together with the [`ZigZag`] encoding is used. /// /// return: /// - the total number of bytes written to buf @@ -114,7 +114,7 @@ pub fn read_u256_varint(buf: &[u8]) -> Option<(U256, usize)> { pub fn write_scalar_varints>(buf: &mut [u8], scals: &[MontScalar]) -> usize { let mut total_bytes_written = 0; - for scal in scals.iter() { + for scal in scals { let bytes_written = write_scalar_varint(&mut buf[total_bytes_written..], scal); total_bytes_written += bytes_written; @@ -124,7 +124,7 @@ pub fn write_scalar_varints>(buf: &mut [u8], scals: &[MontScala } /// This function read all the specified scalars from `input_buf` to `scals_buf`. -/// For that, it converts the input buffer from a Varint and ZigZag encoding to a Dalek Scalar +/// For that, it converts the input buffer from a Varint and [`ZigZag`] encoding to a Dalek Scalar /// /// See `` as reference. /// @@ -174,7 +174,7 @@ pub fn u256_varint_size(zig_x: U256) -> usize { pub fn scalar_varints_size>(scals: &[MontScalar]) -> usize { let mut all_size: usize = 0; - for x in scals.iter() { + for x in scals { all_size += scalar_varint_size(x); } diff --git a/crates/proof-of-sql/src/base/math/log.rs b/crates/proof-of-sql/src/base/math/log.rs index b18d3305e..6fa5c2b43 100644 --- a/crates/proof-of-sql/src/base/math/log.rs +++ b/crates/proof-of-sql/src/base/math/log.rs @@ -29,7 +29,7 @@ pub fn is_pow2_bytes(data: &[u8; N]) -> bool { } } -/// Calculate the floored log_2 of the (unsigned) bytes data. +/// Calculate the floored `log_2` of the (unsigned) bytes data. /// /// The first byte in the array should represent the smallest digit. /// If the data is 0, returns 0 instead of panicking. @@ -44,7 +44,7 @@ pub fn log2_down_bytes(data: &[u8; N]) -> usize { } } -/// Calculate the ceiled log_2 of the (unsigned) bytes data. +/// Calculate the ceiled `log_2` of the (unsigned) bytes data. /// /// The first byte in the array should represent the smallest digit. /// If the data is 0, returns 0 instead of panicking. diff --git a/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs b/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs index 9f39a132d..9ac0eed85 100644 --- a/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs +++ b/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs @@ -112,7 +112,7 @@ impl CompositePolynomial { } #[cfg(test)] - /// Returns the product of the flattened_ml_extensions with referenced (as usize) by `terms` at the index `i`. + /// Returns the product of the `flattened_ml_extensions` with referenced (as usize) by `terms` at the index `i`. fn term_product(&self, terms: &[usize], i: usize) -> S { terms .iter() diff --git a/crates/proof-of-sql/src/base/polynomial/interpolate_test.rs b/crates/proof-of-sql/src/base/polynomial/interpolate_test.rs index 50ae9627c..ec40d3c57 100644 --- a/crates/proof-of-sql/src/base/polynomial/interpolate_test.rs +++ b/crates/proof-of-sql/src/base/polynomial/interpolate_test.rs @@ -1,4 +1,4 @@ -/** +/* * Adopted from arkworks * * See third_party/license/arkworks.LICENSE diff --git a/crates/proof-of-sql/src/base/polynomial/multilinear_extension.rs b/crates/proof-of-sql/src/base/polynomial/multilinear_extension.rs index 01675f7a8..bce68ffb7 100644 --- a/crates/proof-of-sql/src/base/polynomial/multilinear_extension.rs +++ b/crates/proof-of-sql/src/base/polynomial/multilinear_extension.rs @@ -114,7 +114,7 @@ impl MultilinearExtension for &Column<'_, S> { match self { Column::Boolean(c) => c.mul_add(res, multiplier), Column::Scalar(c) | Column::VarChar((_, c)) | Column::Decimal75(_, _, c) => { - c.mul_add(res, multiplier) + c.mul_add(res, multiplier); } Column::SmallInt(c) => c.mul_add(res, multiplier), Column::Int(c) => c.mul_add(res, multiplier), @@ -156,7 +156,7 @@ impl MultilinearExtension for Column<'_, S> { } fn mul_add(&self, res: &mut [S], multiplier: &S) { - (&self).mul_add(res, multiplier) + (&self).mul_add(res, multiplier); } fn to_sumcheck_term(&self, num_vars: usize) -> Rc> { diff --git a/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs b/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs index 89b5b4824..a9f0287f0 100644 --- a/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs +++ b/crates/proof-of-sql/src/base/proof/keccak256_transcript.rs @@ -42,14 +42,14 @@ mod tests { use super::{super::transcript_core::test_util::*, Keccak256Transcript}; #[test] fn we_get_equivalent_challenges_with_equivalent_keccak256_transcripts() { - we_get_equivalent_challenges_with_equivalent_transcripts::() + we_get_equivalent_challenges_with_equivalent_transcripts::(); } #[test] fn we_get_different_challenges_with_different_keccak256_transcripts() { - we_get_different_challenges_with_different_transcripts::() + we_get_different_challenges_with_different_transcripts::(); } #[test] fn we_get_different_nontrivial_consecutive_challenges_from_keccak256_transcript() { - we_get_different_nontrivial_consecutive_challenges_from_transcript::() + we_get_different_nontrivial_consecutive_challenges_from_transcript::(); } } diff --git a/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs b/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs index b2ff61597..2dd8bdeac 100644 --- a/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs +++ b/crates/proof-of-sql/src/base/proof/merlin_transcript_core.rs @@ -17,14 +17,14 @@ mod tests { use super::super::transcript_core::test_util::*; #[test] fn we_get_equivalent_challenges_with_equivalent_merlin_transcripts() { - we_get_equivalent_challenges_with_equivalent_transcripts::() + we_get_equivalent_challenges_with_equivalent_transcripts::(); } #[test] fn we_get_different_challenges_with_different_keccak256_transcripts() { - we_get_different_challenges_with_different_transcripts::() + we_get_different_challenges_with_different_transcripts::(); } #[test] fn we_get_different_nontrivial_consecutive_challenges_from_keccak256_transcript() { - we_get_different_nontrivial_consecutive_challenges_from_transcript::() + we_get_different_nontrivial_consecutive_challenges_from_transcript::(); } } diff --git a/crates/proof-of-sql/src/base/proof/transcript.rs b/crates/proof-of-sql/src/base/proof/transcript.rs index c50ba74da..82f23318c 100644 --- a/crates/proof-of-sql/src/base/proof/transcript.rs +++ b/crates/proof-of-sql/src/base/proof/transcript.rs @@ -28,7 +28,7 @@ pub trait Transcript { /// Request a challenge. Returns the raw, unreversed, bytes. (i.e. littleendian form) fn challenge_as_le(&mut self) -> [u8; 32]; - /// Appends a type that implements [serde::Serialize] by appending the raw bytes (i.e. assuming the message is littleendian) + /// Appends a type that implements [`serde::Serialize`] by appending the raw bytes (i.e. assuming the message is littleendian) /// /// # Panics /// - Panics if `postcard::to_allocvec(message)` fails to serialize the message. diff --git a/crates/proof-of-sql/src/base/slice_ops/mul_add_assign_test.rs b/crates/proof-of-sql/src/base/slice_ops/mul_add_assign_test.rs index 9035743eb..8c164f0f9 100644 --- a/crates/proof-of-sql/src/base/slice_ops/mul_add_assign_test.rs +++ b/crates/proof-of-sql/src/base/slice_ops/mul_add_assign_test.rs @@ -10,7 +10,7 @@ fn test_mul_add_assign() { assert_eq!(a, c); } -/// test mul_add_assign with uneven vectors +/// test [`mul_add_assign`] with uneven vectors #[test] fn test_mul_add_assign_uneven() { let mut a = vec![1, 2, 3, 4, 5]; @@ -20,7 +20,7 @@ fn test_mul_add_assign_uneven() { assert_eq!(a, c); } -/// test mul_add_assign with with uneven panics when len(a) < len(b) +/// test [`mul_add_assign`] with with uneven panics when len(a) < len(b) #[test] #[should_panic] fn test_mul_add_assign_uneven_panic() { @@ -29,7 +29,7 @@ fn test_mul_add_assign_uneven_panic() { mul_add_assign(&mut a, 10, &b); } -/// test mul_add_assign with curve25519scalar +/// test [`mul_add_assign`] with curve25519scalar #[test] fn test_mul_add_assign_curve25519scalar() { let mut a = vec![Curve25519Scalar::from(1u64), Curve25519Scalar::from(2u64)]; @@ -42,7 +42,7 @@ fn test_mul_add_assign_curve25519scalar() { assert_eq!(a, c); } -/// test mul_add_assign with uneven curve25519scalars +/// test [`mul_add_assign`] with uneven curve25519scalars #[test] fn test_mul_add_assign_curve25519scalar_uneven() { let mut a = vec![ diff --git a/crates/proof-of-sql/src/base/slice_ops/slice_cast_test.rs b/crates/proof-of-sql/src/base/slice_ops/slice_cast_test.rs index 90b8fe1e1..ec0496dc4 100644 --- a/crates/proof-of-sql/src/base/slice_ops/slice_cast_test.rs +++ b/crates/proof-of-sql/src/base/slice_ops/slice_cast_test.rs @@ -10,7 +10,7 @@ fn test_slice_map_to_vec() { assert_eq!(a, b); } -/// add tests for slice_cast_with +/// add tests for [`slice_cast_with`] #[test] fn test_slice_cast_with_from_curve25519_scalar_to_dalek_scalar() { let a: Vec = vec![Curve25519Scalar::from(1u64), Curve25519Scalar::from(2u64)]; @@ -19,7 +19,7 @@ fn test_slice_cast_with_from_curve25519_scalar_to_dalek_scalar() { assert_eq!(a, b); } -/// add tests for slice_cast +/// add tests for [`slice_cast`] #[test] fn test_slice_cast_from_curve25519_scalar_to_dalek_scalar() { let a: Vec = vec![Curve25519Scalar::from(1u64), Curve25519Scalar::from(2u64)]; @@ -28,7 +28,7 @@ fn test_slice_cast_from_curve25519_scalar_to_dalek_scalar() { assert_eq!(a, b); } -/// random test for slice_cast_with +/// random test for [`slice_cast_with`] #[test] fn test_slice_cast_with_random() { use rand::Rng; @@ -70,7 +70,7 @@ fn test_slice_cast_mut() { assert_eq!(b, slice_cast_with(&a, |&x| x as u64)); } -/// random test for slice_cast_mut_with +/// random test for [`slice_cast_mut_with`] #[test] fn test_slice_cast_mut_with_random() { use rand::Rng; @@ -81,7 +81,7 @@ fn test_slice_cast_mut_with_random() { assert_eq!(b, slice_cast_with(&a, |&x| x as u64)); } -/// random test for slice_cast_mut_with +/// random test for [`slice_cast_mut_with`] #[test] fn test_slice_cast_mut_random() { use rand::Rng; diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dory_inner_product.rs b/crates/proof-of-sql/src/proof_primitive/dory/dory_inner_product.rs index ce7b9e3e6..fd385ef00 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dory_inner_product.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dory_inner_product.rs @@ -4,7 +4,7 @@ use super::{ }; use crate::base::proof::Transcript; -/// This is the prover side of the Dory-Innerproduct algorithm in section 3.3 of https://eprint.iacr.org/2020/1274.pdf. +/// This is the prover side of the Dory-Innerproduct algorithm in section 3.3 of . /// This function builds/enqueues `messages`, appends to `transcript`, and consumes `state`. #[cfg(test)] pub fn dory_inner_product_prove( @@ -18,10 +18,10 @@ pub fn dory_inner_product_prove( for _ in 0..nu { dory_reduce_prove(messages, transcript, &mut state, setup); } - scalar_product_prove(messages, transcript, state) + scalar_product_prove(messages, transcript, state); } -/// This is the verifier side of the Dory-Innerproduct algorithm in section 3.3 of https://eprint.iacr.org/2020/1274.pdf. +/// This is the verifier side of the Dory-Innerproduct algorithm in section 3.3 of . /// This function consumes/dequeues from `messages`, appends to `transcript`, and consumes `state`. #[cfg(test)] pub fn dory_inner_product_verify( diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dory_reduce.rs b/crates/proof-of-sql/src/proof_primitive/dory/dory_reduce.rs index b5e5155b7..60e0fa225 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dory_reduce.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dory_reduce.rs @@ -3,7 +3,7 @@ use super::{ }; use crate::base::proof::Transcript; -/// This is the prover side of the Dory-Reduce algorithm in section 3.2 of https://eprint.iacr.org/2020/1274.pdf. +/// This is the prover side of the Dory-Reduce algorithm in section 3.2 of . #[cfg(test)] pub fn dory_reduce_prove( messages: &mut DoryMessages, @@ -28,7 +28,7 @@ pub fn dory_reduce_prove( state.nu -= 1; } -/// This is the verifier side of the Dory-Reduce algorithm in section 3.2 of https://eprint.iacr.org/2020/1274.pdf. +/// This is the verifier side of the Dory-Reduce algorithm in section 3.2 of . #[cfg(test)] pub fn dory_reduce_verify( messages: &mut DoryMessages, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_structure.rs b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_structure.rs index eaeec0579..2f4146060 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_structure.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_structure.rs @@ -152,7 +152,7 @@ mod tests { _ => row[0] .expect("Every row except 1 should have a value in the 0th position."), } - ) + ); } } } @@ -178,8 +178,7 @@ mod tests { let width_of_row = full_width_of_row(row); assert_eq!( width_of_row, *width, - "row: {} does not produce expected width", - row + "row: {row} does not produce expected width" ); } } @@ -266,16 +265,12 @@ mod tests { if valid_pairs.contains(&(row, column)) { assert!( index.is_some(), - "Valid pair ({}, {}) generated no index", - row, - column + "Valid pair ({row}, {column}) generated no index" ); } else { assert!( index.is_none(), - "Invalid pair ({}, {}) generated a valid index", - row, - column + "Invalid pair ({row}, {column}) generated a valid index" ); } } diff --git a/crates/proof-of-sql/src/proof_primitive/dory/extended_state.rs b/crates/proof-of-sql/src/proof_primitive/dory/extended_state.rs index 3851e77e3..60389029b 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/extended_state.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/extended_state.rs @@ -69,7 +69,7 @@ impl ExtendedProverState { } /// Calculate the verifier state from the prover state and setup information. /// This is basically the commitment computation of the witness. - /// See the beginning of section 4 of https://eprint.iacr.org/2020/1274.pdf for details. + /// See the beginning of section 4 of for details. #[cfg(test)] pub fn calculate_verifier_state(&self, setup: &ProverSetup) -> ExtendedVerifierState { let E_1: G1Affine = G1Projective::msm_unchecked(&self.base_state.v1, &self.s2).into(); diff --git a/crates/proof-of-sql/src/proof_primitive/dory/rand_util.rs b/crates/proof-of-sql/src/proof_primitive/dory/rand_util.rs index 0c74c1aa2..911e194aa 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/rand_util.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/rand_util.rs @@ -8,6 +8,7 @@ use ark_std::{ #[cfg(test)] /// Create a random number generator for testing. +#[must_use] pub fn test_rng() -> impl Rng { ark_std::test_rng() } diff --git a/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs b/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs index e8f2534f6..583781120 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs @@ -2,7 +2,7 @@ use super::{pairings, DoryMessages, ProverState, VerifierSetup, VerifierState}; use crate::base::proof::Transcript; -/// This is the prover side of the Scalar-Product algorithm in section 3.1 of https://eprint.iacr.org/2020/1274.pdf. +/// This is the prover side of the Scalar-Product algorithm in section 3.1 of . #[allow(clippy::missing_panics_doc)] pub fn scalar_product_prove( messages: &mut DoryMessages, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/state.rs b/crates/proof-of-sql/src/proof_primitive/dory/state.rs index 633db8c77..7b651c1dc 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/state.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/state.rs @@ -29,7 +29,7 @@ impl ProverState { } /// Calculate the verifier state from the prover state and setup information. /// This is basically the commitment computation of the witness. - /// See the beginning of section 3 of https://eprint.iacr.org/2020/1274.pdf for details. + /// See the beginning of section 3 of for details. #[cfg(test)] pub fn calculate_verifier_state(&self, setup: &ProverSetup) -> VerifierState { assert!(setup.max_nu >= self.nu); diff --git a/crates/proof-of-sql/src/proof_primitive/dory/vmv_state_test.rs b/crates/proof-of-sql/src/proof_primitive/dory/vmv_state_test.rs index 499fa866d..1631827f9 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/vmv_state_test.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/vmv_state_test.rs @@ -108,7 +108,7 @@ fn we_can_create_vmv_states_from_random_vmv_and_get_correct_sizes() { assert_eq!(vmv.L.len(), 1 << nu); assert_eq!(vmv.R.len(), 1 << nu); assert_eq!(vmv.M.len(), 1 << nu); - for row in vmv.M.iter() { + for row in &vmv.M { assert_eq!(row.len(), 1 << nu); } diff --git a/crates/proof-of-sql/src/proof_primitive/sumcheck/proof_test.rs b/crates/proof-of-sql/src/proof_primitive/sumcheck/proof_test.rs index 218cf4cfb..8f1977d98 100644 --- a/crates/proof-of-sql/src/proof_primitive/sumcheck/proof_test.rs +++ b/crates/proof-of-sql/src/proof_primitive/sumcheck/proof_test.rs @@ -4,7 +4,7 @@ use crate::base::{ proof::Transcript as _, scalar::{test_scalar::TestScalar, Curve25519Scalar, Scalar}, }; -/** +/* * Adopted from arkworks * * See third_party/license/arkworks.LICENSE @@ -84,7 +84,7 @@ fn random_product( ) -> (Vec>>, Curve25519Scalar) { let mut multiplicands = Vec::with_capacity(num_multiplicands); for _ in 0..num_multiplicands { - multiplicands.push(Vec::with_capacity(1 << nv)) + multiplicands.push(Vec::with_capacity(1 << nv)); } let mut sum = Curve25519Scalar::zero(); @@ -210,7 +210,7 @@ fn we_can_verify_many_random_test_cases() { assert_ne!( subclaim.evaluation_point, evaluation_point, "either verification should fail or we should have a different evaluation point with a different transcript" - ) + ); } let mut transcript = Transcript::new(b"sumchecktest"); diff --git a/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs b/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs index 8ddbcb3cb..a798c5b44 100644 --- a/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs +++ b/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs @@ -665,7 +665,7 @@ fn we_can_convert_an_ast_without_any_filter() { } ///////////////////////// -/// OrderBy +/// `OrderBy` ///////////////////////// #[test] fn we_can_parse_order_by_with_a_single_column() { @@ -1908,7 +1908,7 @@ fn select_group_and_order_by_preserve_the_column_order_reference() { let query = query!( select: perm_cols, group: group_cols.clone(), - order: order_cols.clone().zip(ordering.clone()).map(|(c, o)| format!("{} {}", c, o)) + order: order_cols.clone().zip(ordering.clone()).map(|(c, o)| format!("{c} {o}")) ); let expected_query = QueryExpr::new( filter(perm_col_plans, tab(t), const_bool(true)), @@ -1921,7 +1921,7 @@ fn select_group_and_order_by_preserve_the_column_order_reference() { } } -/// Creates a new QueryExpr, with the given select statement and a sample schema accessor. +/// Creates a new [`QueryExpr`], with the given select statement and a sample schema accessor. fn query_expr_for_test_table(sql_text: &str) -> QueryExpr { let schema_accessor = schema_accessor_from_table_ref_with_schema( "test.table".parse().unwrap(), @@ -1936,7 +1936,7 @@ fn query_expr_for_test_table(sql_text: &str) -> QueryExpr { QueryExpr::try_new(select_statement, default_schema, &schema_accessor).unwrap() } -/// Serializes and deserializes QueryExpr with flexbuffers and asserts that it remains the same. +/// Serializes and deserializes [`QueryExpr`] with flexbuffers and asserts that it remains the same. fn assert_query_expr_serializes_to_and_from_flex_buffers(query_expr: QueryExpr) { let serialized = flexbuffers::to_vec(&query_expr).unwrap(); let deserialized: QueryExpr = diff --git a/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs b/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs index 71608ec48..dd421d0db 100644 --- a/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs +++ b/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs @@ -5,6 +5,7 @@ use proof_of_sql_parser::{ Identifier, }; +#[must_use] pub fn group_by_postprocessing( cols: &[&str], result_exprs: &[AliasedResultExpr], @@ -19,14 +20,17 @@ pub fn group_by_postprocessing( /// # Panics /// /// This function may panic if the internal structures cannot be created properly, although this is unlikely under normal circumstances. +#[must_use] pub fn select_expr(result_exprs: &[AliasedResultExpr]) -> OwnedTablePostprocessing { OwnedTablePostprocessing::new_select(SelectPostprocessing::new(result_exprs.to_vec())) } +#[must_use] pub fn slice(limit: Option, offset: Option) -> OwnedTablePostprocessing { OwnedTablePostprocessing::new_slice(SlicePostprocessing::new(limit, offset)) } +#[must_use] pub fn orders(cols: &[&str], directions: &[OrderByDirection]) -> OwnedTablePostprocessing { let by_exprs = cols .iter() diff --git a/crates/proof-of-sql/src/sql/proof/provable_query_result.rs b/crates/proof-of-sql/src/sql/proof/provable_query_result.rs index a59008e1e..32bbcc7e1 100644 --- a/crates/proof-of-sql/src/sql/proof/provable_query_result.rs +++ b/crates/proof-of-sql/src/sql/proof/provable_query_result.rs @@ -50,6 +50,7 @@ impl ProvableQueryResult { } /// This function is available to allow for easy creation for testing. #[cfg(test)] + #[must_use] pub fn new_from_raw_data(num_columns: u64, indexes: Indexes, data: Vec) -> Self { Self { num_columns, diff --git a/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs b/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs index 7a9040fba..352a4fd94 100644 --- a/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs +++ b/crates/proof-of-sql/src/sql/proof/provable_query_result_test.rs @@ -128,6 +128,7 @@ fn we_can_evaluate_multiple_result_columns_as_mles_with_128_bits() { assert_eq!(evals, expected_evals); } +#[allow(clippy::similar_names)] #[test] fn we_can_evaluate_multiple_result_columns_as_mles_with_scalar_columns() { let indexes = Indexes::Sparse(vec![0, 2]); diff --git a/crates/proof-of-sql/src/sql/proof/query_proof_test.rs b/crates/proof-of-sql/src/sql/proof/query_proof_test.rs index 6460fbb9b..05c0754d6 100644 --- a/crates/proof-of-sql/src/sql/proof/query_proof_test.rs +++ b/crates/proof-of-sql/src/sql/proof/query_proof_test.rs @@ -181,7 +181,7 @@ fn verify_fails_if_counts_dont_match() { } /// prove and verify an artificial query where -/// res_i = x_i * x_i +/// `res_i = x_i * x_i` /// where the commitment for x is known #[derive(Debug, Serialize)] struct SquareTestProofPlan { diff --git a/crates/proof-of-sql/src/sql/proof/query_result.rs b/crates/proof-of-sql/src/sql/proof/query_result.rs index d5e49db7f..cdefda189 100644 --- a/crates/proof-of-sql/src/sql/proof/query_result.rs +++ b/crates/proof-of-sql/src/sql/proof/query_result.rs @@ -59,6 +59,7 @@ pub struct QueryData { impl QueryData { #[cfg(all(test, feature = "arrow"))] + #[must_use] pub fn into_record_batch(self) -> RecordBatch { self.try_into().unwrap() } diff --git a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test.rs b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test.rs index c02496504..871103c28 100644 --- a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test.rs +++ b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test.rs @@ -71,7 +71,7 @@ impl ProofPlan for EmptyTestQueryExpr { _result: Option<&OwnedTable<::Scalar>>, ) -> Result, ProofError> { let _ = std::iter::repeat_with(|| { - assert_eq!(builder.consume_intermediate_mle(), C::Scalar::ZERO) + assert_eq!(builder.consume_intermediate_mle(), C::Scalar::ZERO); }) .take(self.columns) .collect::>(); diff --git a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs index a2763477b..956c013dd 100644 --- a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs +++ b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs @@ -15,7 +15,7 @@ use curve25519_dalek::{ristretto::RistrettoPoint, traits::Identity}; use num_traits::One; use serde::Serialize; -/// This function takes a valid verifiable_result, copies it, tweaks it, and checks that +/// This function takes a valid `verifiable_result`, copies it, tweaks it, and checks that /// verification fails. /// /// It's useful as a tool for testing proof code. diff --git a/crates/proof-of-sql/src/sql/proof_exprs/add_subtract_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/add_subtract_expr_test.rs index c4f5f3327..7ab3a308f 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/add_subtract_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/add_subtract_expr_test.rs @@ -287,7 +287,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([varchar("d", expected_d), int128("f", expected_f)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/and_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/and_expr_test.rs index fa896fb69..c96b9cb8b 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/and_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/and_expr_test.rs @@ -131,7 +131,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([bigint("a", expected_a), varchar("d", expected_d)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/equals_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/equals_expr_test.rs index 3584a5607..ac1426ea3 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/equals_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/equals_expr_test.rs @@ -373,7 +373,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([bigint("a", expected_a), varchar("d", expected_d)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/inequality_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/inequality_expr_test.rs index 4a305c979..a452da701 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/inequality_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/inequality_expr_test.rs @@ -489,7 +489,7 @@ fn the_sign_can_be_0_or_1_for_a_constant_column_of_zeros() { ); if let DynProofPlan::Filter(filter) = &mut ast { if let DynProofExpr::Inequality(lte) = &mut filter.where_clause { - lte.treat_column_of_zeros_as_negative = true + lte.treat_column_of_zeros_as_negative = true; } } let verifiable_res = VerifiableQueryResult::new(&ast, &accessor, &()); @@ -546,7 +546,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([bigint("a", expected_a), varchar("b", expected_b)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/literal_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/literal_expr_test.rs index fe102cb6c..854971348 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/literal_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/literal_expr_test.rs @@ -69,7 +69,7 @@ fn test_random_tables_with_given_offset(offset: usize) { bigint("c", expected_c), ]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs index d76c62098..1a145f10b 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/multiply_expr_test.rs @@ -316,7 +316,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([varchar("d", expected_d), int128("f", expected_f)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/not_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/not_expr_test.rs index eccd2ef20..60b041ea1 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/not_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/not_expr_test.rs @@ -93,7 +93,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([bigint("a", expected_a), varchar("b", expected_b)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/or_expr_test.rs b/crates/proof-of-sql/src/sql/proof_exprs/or_expr_test.rs index 36f758ad0..ac79a280e 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/or_expr_test.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/or_expr_test.rs @@ -154,7 +154,7 @@ fn test_random_tables_with_given_offset(offset: usize) { .multiunzip(); let expected_result = owned_table([bigint("a", expected_a), varchar("d", expected_d)]); - assert_eq!(expected_result, res) + assert_eq!(expected_result, res); } } diff --git a/crates/proof-of-sql/src/sql/proof_plans/filter_exec_test_dishonest_prover.rs b/crates/proof-of-sql/src/sql/proof_plans/filter_exec_test_dishonest_prover.rs index 2c496545e..5528d5fd9 100644 --- a/crates/proof-of-sql/src/sql/proof_plans/filter_exec_test_dishonest_prover.rs +++ b/crates/proof-of-sql/src/sql/proof_plans/filter_exec_test_dishonest_prover.rs @@ -117,7 +117,7 @@ fn tamper_column<'a>( alloc: &'a Bump, mut columns: Vec>, ) -> Vec> { - for column in columns.iter_mut() { + for column in &mut columns { if let Column::Scalar(tampered_column) = column { if !tampered_column.is_empty() { let tampered_column = alloc.alloc_slice_copy(tampered_column); diff --git a/crates/proof-of-sql/src/sql/proof_plans/group_by_exec_test.rs b/crates/proof-of-sql/src/sql/proof_plans/group_by_exec_test.rs index 8f60bc9ef..87d7e5fdb 100644 --- a/crates/proof-of-sql/src/sql/proof_plans/group_by_exec_test.rs +++ b/crates/proof-of-sql/src/sql/proof_plans/group_by_exec_test.rs @@ -11,7 +11,7 @@ use crate::{ }, }; -/// select a, sum(c) as sum_c, count(*) as __count__ from sxt.t where b = 99 group by a +/// `select a, sum(c) as sum_c, count(*) as __count__ from sxt.t where b = 99 group by a` #[test] fn we_can_prove_a_simple_group_by_with_bigint_columns() { let data = owned_table([ @@ -40,7 +40,7 @@ fn we_can_prove_a_simple_group_by_with_bigint_columns() { assert_eq!(res, expected); } -/// select a, sum(c * 2 + 1) as sum_c, count(*) as __count__ from sxt.t where b = 99 group by a +/// `select a, sum(c * 2 + 1) as sum_c, count(*) as __count__ from sxt.t where b = 99 group by a` #[test] fn we_can_prove_a_group_by_with_bigint_columns() { let data = owned_table([ diff --git a/crates/proof-of-sql/src/tests/sol_test_util.rs b/crates/proof-of-sql/src/tests/sol_test_util.rs index 9ae41b09d..2734696f2 100644 --- a/crates/proof-of-sql/src/tests/sol_test_util.rs +++ b/crates/proof-of-sql/src/tests/sol_test_util.rs @@ -2,7 +2,7 @@ use alloy_sol_types::{private::primitives::Bytes, SolValue}; use snafu::Snafu; use std::{ffi::OsStr, io, process::Command}; -/// Error type returned by [ForgeScript] functions. +/// Error type returned by [`ForgeScript`] functions. #[derive(Debug, Snafu)] pub enum ForgeScriptError<'a> { /// The script failed to run. This is usually because `forge` is not installed. @@ -13,7 +13,7 @@ pub enum ForgeScriptError<'a> { SolidityError { underlying_command: &'a Command }, } -/// [ForgeScript] enables running solidity from within rust. Ultimately this type calls `forge script`. +/// [`ForgeScript`] enables running solidity from within rust. Ultimately this type calls `forge script`. /// As a result, `forge` must be installed. /// See for instructions. pub struct ForgeScript { diff --git a/crates/proof-of-sql/tests/decimal_integration_tests.rs b/crates/proof-of-sql/tests/decimal_integration_tests.rs index 20e8146c5..37c2847b3 100644 --- a/crates/proof-of-sql/tests/decimal_integration_tests.rs +++ b/crates/proof-of-sql/tests/decimal_integration_tests.rs @@ -157,8 +157,8 @@ mod decimal_query_tests { "SELECT * FROM table WHERE c = 0.1", 7, 6, - vec![S::from(100000), S::ZERO, S::from(100000)], - vec![S::from(100000), S::from(100000)], + vec![S::from(100_000), S::ZERO, S::from(100_000)], + vec![S::from(100_000), S::from(100_000)], ); } @@ -179,8 +179,8 @@ mod decimal_query_tests { "SELECT * FROM table WHERE c = 123.456;", 6, 3, - vec![S::from(123456), S::ZERO, S::from(123456)], - vec![S::from(123456), S::from(123456)], + vec![S::from(123_456), S::ZERO, S::from(123_456)], + vec![S::from(123_456), S::from(123_456)], ); } @@ -190,8 +190,8 @@ mod decimal_query_tests { "SELECT * FROM table WHERE c = 12345", 7, 2, - vec![S::from(1234500), S::ZERO, S::from(1234500)], - vec![S::from(1234500), S::from(1234500)], + vec![S::from(1_234_500), S::ZERO, S::from(1_234_500)], + vec![S::from(1_234_500), S::from(1_234_500)], ); } @@ -201,8 +201,8 @@ mod decimal_query_tests { "SELECT * FROM table WHERE c = -12345", 7, 2, - vec![-S::from(1234500), S::ZERO, -S::from(1234500)], - vec![-S::from(1234500), -S::from(1234500)], + vec![-S::from(1_234_500), S::ZERO, -S::from(1_234_500)], + vec![-S::from(1_234_500), -S::from(1_234_500)], ); } diff --git a/crates/proof-of-sql/tests/integration_tests.rs b/crates/proof-of-sql/tests/integration_tests.rs index b0c87b7a5..76425c8d1 100644 --- a/crates/proof-of-sql/tests/integration_tests.rs +++ b/crates/proof-of-sql/tests/integration_tests.rs @@ -425,7 +425,7 @@ fn decimal_type_issues_should_cause_provable_ast_to_fail() { 0, ); let large_decimal = format!("0.{}", "1".repeat(75)); - let query_string = format!("SELECT d0 + {} as res FROM table;", large_decimal); + let query_string = format!("SELECT d0 + {large_decimal} as res FROM table;"); assert!(matches!( QueryExpr::::try_new( query_string.parse().unwrap(), @@ -472,7 +472,7 @@ fn we_can_prove_a_complex_query_with_curve25519() { bigint("t", [-5]), decimal75("g", 3, 1, [457]), boolean("h", [true]), - decimal75("dr", 26, 6, [1400006]), + decimal75("dr", 26, 6, [1_400_006]), ]); assert_eq!(owned_table_result, expected_result); } @@ -524,7 +524,7 @@ fn we_can_prove_a_complex_query_with_dory() { decimal75("res", 22, 1, [25]), bigint("g", [32]), boolean("h", [true]), - decimal75("res2", 46, 4, [129402]), + decimal75("res2", 46, 4, [129_402]), ]); assert_eq!(owned_table_result, expected_result); } diff --git a/crates/proof-of-sql/tests/timestamp_integration_tests.rs b/crates/proof-of-sql/tests/timestamp_integration_tests.rs index aeb4ea0b1..de8ffb948 100644 --- a/crates/proof-of-sql/tests/timestamp_integration_tests.rs +++ b/crates/proof-of-sql/tests/timestamp_integration_tests.rs @@ -126,8 +126,8 @@ mod tests { #[test] fn test_basic_timestamp_query() { - let test_timestamps = vec![1609459200, 1612137600, 1614556800]; - let expected_timestamps = vec![1609459200]; + let test_timestamps = vec![1_609_459_200, 1_612_137_600, 1_614_556_800]; + let expected_timestamps = vec![1_609_459_200]; run_timestamp_query_test( "SELECT * FROM table WHERE times = timestamp '2021-01-01T00:00:00Z';", @@ -235,8 +235,8 @@ mod tests { // and the gateway. #[test] fn test_timestamp_queries_match_postgresql_and_gateway() { - let test_timestamps = vec![1230995705, 1230992105, 1230999305, 1230995705]; - let expected_timestamps = vec![1230995705, 1230995705]; + let test_timestamps = vec![1_230_995_705, 1_230_992_105, 1_230_999_305, 1_230_995_705]; + let expected_timestamps = vec![1_230_995_705, 1_230_995_705]; run_timestamp_query_test( "SELECT * FROM table WHERE times = timestamp '2009-01-03T19:15:05+04:00'", @@ -250,8 +250,8 @@ mod tests { // Unix time for 1998-12-31T23:59:59 UTC is 915148799 // Assuming leap second at 1998-12-31T23:59:60 UTC is recognized, it would be 915148799 // Unix time for 1999-01-01T00:00:00 UTC is 915148800 - let test_timestamps = vec![915148799, 915148800, 915148801]; - let expected_timestamps = [915148799, 915148800, 915148801]; // Expect the leap second to be parsed and matched + let test_timestamps = vec![915_148_799, 915_148_800, 915_148_801]; + let expected_timestamps = [915_148_799, 915_148_800, 915_148_801]; // Expect the leap second to be parsed and matched // Test the query to select the leap second run_timestamp_query_test( @@ -351,8 +351,8 @@ mod tests { #[test] fn test_unix_epoch_daylight_saving() { // Timestamps just before and after DST change in spring - let test_timestamps = vec![1583651999, 1583652000]; // Spring forward at 2 AM - let expected_timestamps = vec![1583651999]; // Only the time before the DST jump should match + let test_timestamps = vec![1_583_651_999, 1_583_652_000]; // Spring forward at 2 AM + let expected_timestamps = vec![1_583_651_999]; // Only the time before the DST jump should match run_timestamp_query_test( "SELECT * FROM table WHERE times = to_timestamp(1583651999)", @@ -363,8 +363,8 @@ mod tests { #[test] fn test_unix_epoch_leap_year() { - let test_timestamps = vec![1582934400]; // 2020-02-29T00:00:00Z - let expected_timestamps = vec![1582934400]; + let test_timestamps = vec![1_582_934_400]; // 2020-02-29T00:00:00Z + let expected_timestamps = vec![1_582_934_400]; run_timestamp_query_test( "SELECT * FROM table WHERE times = to_timestamp(1582934400);", @@ -376,10 +376,10 @@ mod tests { #[test] fn test_unix_epoch_time_zone_handling() { let test_timestamps = vec![ - 1603587600, // 2020-10-25T01:00:00Z in UTC, corresponds to 2 AM in UTC+1 before DST ends - 1603591200, // Corresponds to 2 AM in UTC+1 after DST ends (1 hour later) + 1_603_587_600, // 2020-10-25T01:00:00Z in UTC, corresponds to 2 AM in UTC+1 before DST ends + 1_603_591_200, // Corresponds to 2 AM in UTC+1 after DST ends (1 hour later) ]; - let expected_timestamps = vec![1603587600]; + let expected_timestamps = vec![1_603_587_600]; run_timestamp_query_test( "SELECT * FROM table WHERE times = to_timestamp(1603587600)", @@ -413,7 +413,7 @@ fn we_can_prove_timestamp_inequality_queries_with_multiple_columns() { 0, 1, -2, - 1231006505000000000, // bitcoin genesis block time + 1_231_006_505_000_000_000, // bitcoin genesis block time i64::MAX, ], ), @@ -421,7 +421,16 @@ fn we_can_prove_timestamp_inequality_queries_with_multiple_columns() { "b", PoSQLTimeUnit::Nanosecond, PoSQLTimeZone::Utc, - [i64::MAX, -2, -1, -1231006505000000000, 0, 1, 2, i64::MIN], + [ + i64::MAX, + -2, + -1, + -1_231_006_505_000_000_000, + 0, + 1, + 2, + i64::MIN, + ], ), ]), 0,