diff --git a/Cargo.toml b/Cargo.toml index a25d977d8..7cf60d375 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,3 +62,6 @@ zerocopy = { version = "0.7.34" } [workspace.lints.rust] missing_docs = "warn" + +[workspace.lints.clippy] +missing_panics_doc = "warn" diff --git a/clippy.toml b/clippy.toml index 3f5e90af0..1d490570f 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,2 +1,3 @@ missing-docs-in-crate-items = true +check-private-items = true doc-valid-idents = ["DeFi"] diff --git a/crates/proof-of-sql-parser/src/identifier.rs b/crates/proof-of-sql-parser/src/identifier.rs index 3248bcd14..584c54cbf 100644 --- a/crates/proof-of-sql-parser/src/identifier.rs +++ b/crates/proof-of-sql-parser/src/identifier.rs @@ -16,6 +16,11 @@ impl Identifier { /// This is necessary to guarantee that no one outside the crate /// can create Names, thus securing that [`ResourceId`]s and [`Identifier`]s /// are always valid postgresql identifiers. + /// + /// # Panics + /// + /// This function will panic if: + /// - The provided string is too long to fit into the internal `ArrayString`. pub(crate) fn new>(string: S) -> Self { Self { name: ArrayString::from(&string.as_ref().to_lowercase()).expect("Identifier too long"), diff --git a/crates/proof-of-sql-parser/src/intermediate_ast.rs b/crates/proof-of-sql-parser/src/intermediate_ast.rs index 70bec1122..5856679e1 100644 --- a/crates/proof-of-sql-parser/src/intermediate_ast.rs +++ b/crates/proof-of-sql-parser/src/intermediate_ast.rs @@ -232,6 +232,10 @@ impl Expression { }) } /// Create an `AliasedResultExpr` from an `Expression` using the provided alias. + /// # Panics + /// + /// This function will panic if the provided `alias` cannot be parsed into an `Identifier`. + /// It will also panic if `self` cannot be boxed. #[must_use] pub fn alias(self, alias: &str) -> AliasedResultExpr { AliasedResultExpr { diff --git a/crates/proof-of-sql-parser/src/lib.rs b/crates/proof-of-sql-parser/src/lib.rs index b9356607d..b92937815 100644 --- a/crates/proof-of-sql-parser/src/lib.rs +++ b/crates/proof-of-sql-parser/src/lib.rs @@ -1,6 +1,6 @@ #![doc = include_str!("../README.md")] #![no_std] -#![allow(clippy::missing_panics_doc)] // Fixed in Issue #163 +#![cfg_attr(test, allow(clippy::missing_panics_doc))] extern crate alloc; /// Module for handling an intermediate decimal type received from the lexer. @@ -35,7 +35,7 @@ pub mod resource_id; pub use resource_id::ResourceId; // lalrpop-generated code is not clippy-compliant -lalrpop_mod!(#[allow(clippy::all, missing_docs, clippy::missing_docs_in_private_items, clippy::pedantic)] pub sql); +lalrpop_mod!(#[allow(clippy::all, missing_docs, clippy::missing_docs_in_private_items, clippy::pedantic, clippy::missing_panics_doc)] pub sql); /// Implement [`Deserialize`](serde::Deserialize) through [`FromStr`](core::str::FromStr) to avoid invalid identifiers. #[macro_export] diff --git a/crates/proof-of-sql-parser/src/posql_time/unit.rs b/crates/proof-of-sql-parser/src/posql_time/unit.rs index 480080b76..26503e369 100644 --- a/crates/proof-of-sql-parser/src/posql_time/unit.rs +++ b/crates/proof-of-sql-parser/src/posql_time/unit.rs @@ -45,7 +45,7 @@ impl fmt::Display for PoSQLTimeUnit { // allow(deprecated) for the sole purpose of testing that // timestamp precision is parsed correctly. #[cfg(test)] -#[allow(deprecated)] +#[allow(deprecated, clippy::missing_panics_doc)] mod time_unit_tests { use super::*; use crate::posql_time::{PoSQLTimestamp, PoSQLTimestampError}; diff --git a/crates/proof-of-sql-parser/src/select_statement.rs b/crates/proof-of-sql-parser/src/select_statement.rs index cd97c4ce8..1d6bbc55f 100644 --- a/crates/proof-of-sql-parser/src/select_statement.rs +++ b/crates/proof-of-sql-parser/src/select_statement.rs @@ -69,6 +69,11 @@ impl FromStr for SelectStatement { } } +/// # Panics +/// +/// This function will panic in the following cases: +/// - If `ResourceId::try_new` fails to create a valid `ResourceId`, +/// the `.unwrap()` call will cause a panic. fn convert_table_expr_to_resource_id_vector( table_expressions: &[Box], default_schema: Identifier, diff --git a/crates/proof-of-sql-parser/src/utility.rs b/crates/proof-of-sql-parser/src/utility.rs index d6b0d8957..cf9d7781e 100644 --- a/crates/proof-of-sql-parser/src/utility.rs +++ b/crates/proof-of-sql-parser/src/utility.rs @@ -7,6 +7,10 @@ use crate::{ }; use alloc::{boxed::Box, vec, vec::Vec}; +/// +/// # Panics +/// +/// This function will panic if`name`(if provided) cannot be parsed. /// Construct an identifier from a str #[must_use] pub fn ident(name: &str) -> Identifier { @@ -115,6 +119,9 @@ pub fn div(left: Box, right: Box) -> Box { /// Get table from schema and name. /// /// If the schema is `None`, the table is assumed to be in the default schema. +/// # Panics +/// +/// This function will panic if either the `name` or the `schema` (if provided) cannot be parsed as valid [Identifier]s. #[must_use] pub fn tab(schema: Option<&str>, name: &str) -> Box { Box::new(TableExpression::Named { @@ -124,6 +131,10 @@ pub fn tab(schema: Option<&str>, name: &str) -> Box { } /// Get column from name +/// +/// # Panics +/// +/// This function will panic if the `name` cannot be parsed into a valid column expression as valid [Identifier]s. #[must_use] pub fn col(name: &str) -> Box { Box::new(Expression::Column(name.parse().unwrap())) @@ -177,6 +188,10 @@ pub fn count_all() -> Box { } /// An expression with an alias i.e. EXPR AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed as valid [Identifier]s. #[must_use] pub fn aliased_expr(expr: Box, alias: &str) -> AliasedResultExpr { AliasedResultExpr { @@ -192,6 +207,10 @@ pub fn col_res_all() -> SelectResultExpr { } /// Select one column from a table and give it an alias i.e. SELECT COL AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed as valid [Identifier]s. #[must_use] pub fn col_res(col_val: Box, alias: &str) -> SelectResultExpr { SelectResultExpr::AliasedResultExpr(AliasedResultExpr { @@ -207,6 +226,10 @@ pub fn cols_res(names: &[&str]) -> Vec { } /// Compute the minimum of an expression and give it an alias i.e. SELECT MIN(EXPR) AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed. #[must_use] pub fn min_res(expr: Box, alias: &str) -> SelectResultExpr { SelectResultExpr::AliasedResultExpr(AliasedResultExpr { @@ -216,6 +239,10 @@ pub fn min_res(expr: Box, alias: &str) -> SelectResultExpr { } /// Compute the maximum of an expression and give it an alias i.e. SELECT MAX(EXPR) AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed. #[must_use] pub fn max_res(expr: Box, alias: &str) -> SelectResultExpr { SelectResultExpr::AliasedResultExpr(AliasedResultExpr { @@ -225,6 +252,10 @@ pub fn max_res(expr: Box, alias: &str) -> SelectResultExpr { } /// Compute the sum of an expression and give it an alias i.e. SELECT SUM(EXPR) AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed. #[must_use] pub fn sum_res(expr: Box, alias: &str) -> SelectResultExpr { SelectResultExpr::AliasedResultExpr(AliasedResultExpr { @@ -234,6 +265,10 @@ pub fn sum_res(expr: Box, alias: &str) -> SelectResultExpr { } /// Count the amount of non-null entries of expression and give it an alias i.e. SELECT COUNT(EXPR) AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed. #[must_use] pub fn count_res(expr: Box, alias: &str) -> SelectResultExpr { SelectResultExpr::AliasedResultExpr(AliasedResultExpr { @@ -243,6 +278,10 @@ pub fn count_res(expr: Box, alias: &str) -> SelectResultExpr { } /// Count rows and give the result an alias i.e. SELECT COUNT(*) AS ALIAS +/// +/// # Panics +/// +/// This function will panic if the `alias` cannot be parsed. #[must_use] pub fn count_all_res(alias: &str) -> SelectResultExpr { SelectResultExpr::AliasedResultExpr(AliasedResultExpr { @@ -305,6 +344,10 @@ pub fn select( } /// Order by one column i.e. ORDER BY ID [ASC|DESC] +/// +/// # Panics +/// +/// This function will panic if the `id` cannot be parsed into an identifier. #[must_use] pub fn order(id: &str, direction: OrderByDirection) -> Vec { vec![OrderBy { @@ -314,6 +357,11 @@ pub fn order(id: &str, direction: OrderByDirection) -> Vec { } /// Order by multiple columns i.e. ORDER BY ID0 [ASC|DESC], ID1 [ASC|DESC], ... +/// +/// # Panics +/// +/// This function will panic if any of the `ids` cannot be parsed +/// into an identifier. #[must_use] pub fn orders(ids: &[&str], directions: &[OrderByDirection]) -> Vec { ids.iter() @@ -335,6 +383,11 @@ pub fn slice(number_rows: u64, offset_value: i64) -> Option { } /// Group by clause with multiple columns i.e. GROUP BY ID0, ID1, ... +/// +/// # Panics +/// +/// This function will panic if any of the `ids` cannot be parsed +/// into an identifier. #[must_use] pub fn group_by(ids: &[&str]) -> Vec { ids.iter().map(|id| id.parse().unwrap()).collect() diff --git a/crates/proof-of-sql/benches/bench_append_rows.rs b/crates/proof-of-sql/benches/bench_append_rows.rs index be12644a7..72eef7913 100644 --- a/crates/proof-of-sql/benches/bench_append_rows.rs +++ b/crates/proof-of-sql/benches/bench_append_rows.rs @@ -38,6 +38,12 @@ use std::ops::Deref; /// append 10 rows to 10 cols in 100 tables = 1.1382 seconds /// append 1000 rows to 10 cols in 1 table = 652ms /// ``` +/// +/// # Panics +/// +/// Will panic if the creation of the table commitment fails due to invalid column data or an incorrect prover setup. +/// +/// Will panic if the row appending operation fails due to invalid data or if the local commitment has reached an invalid state. fn bench_append_rows(c: &mut Criterion, cols: usize, rows: usize) { let public_parameters = PublicParameters::test_rand(10, &mut test_rng()); let prover_setup = ProverSetup::from(&public_parameters); diff --git a/crates/proof-of-sql/benches/jaeger_benches.rs b/crates/proof-of-sql/benches/jaeger_benches.rs index c66c72c7c..00090098c 100644 --- a/crates/proof-of-sql/benches/jaeger_benches.rs +++ b/crates/proof-of-sql/benches/jaeger_benches.rs @@ -15,13 +15,11 @@ use proof_of_sql::proof_primitive::dory::{ DoryEvaluationProof, DoryProverPublicSetup, DoryVerifierPublicSetup, ProverSetup, PublicParameters, VerifierSetup, }; -/// TODO: add docs mod scaffold; use crate::scaffold::querys::QUERIES; use scaffold::jaeger_scaffold; use std::env; -/// TODO: add docs const SIZE: usize = 1_000_000; fn main() { diff --git a/crates/proof-of-sql/benches/scaffold/benchmark_accessor.rs b/crates/proof-of-sql/benches/scaffold/benchmark_accessor.rs index 1e89b770a..1046fc0e3 100644 --- a/crates/proof-of-sql/benches/scaffold/benchmark_accessor.rs +++ b/crates/proof-of-sql/benches/scaffold/benchmark_accessor.rs @@ -18,6 +18,9 @@ pub struct BenchmarkAccessor<'a, C: Commitment> { } impl<'a, C: Commitment> BenchmarkAccessor<'a, C> { + /// # Panics + /// + /// Will panic if the length of the columns does not match after insertion or if the commitment computation fails. pub fn insert_table( &mut self, table_ref: TableRef, @@ -63,11 +66,17 @@ impl<'a, C: Commitment> BenchmarkAccessor<'a, C> { } impl DataAccessor for BenchmarkAccessor<'_, C> { + /// # Panics + /// + /// Will panic if the column reference does not exist in the accessor. fn get_column(&self, column: ColumnRef) -> Column { *self.columns.get(&column).unwrap() } } impl MetadataAccessor for BenchmarkAccessor<'_, C> { + /// # Panics + /// + /// Will panic if the table reference does not exist in the lengths map. fn get_length(&self, table_ref: TableRef) -> usize { *self.lengths.get(&table_ref).unwrap() } @@ -76,6 +85,9 @@ impl MetadataAccessor for BenchmarkAccessor<'_, C> { } } impl CommitmentAccessor for BenchmarkAccessor<'_, C> { + /// # Panics + /// + /// Will panic if the column reference does not exist in the commitments map. fn get_commitment(&self, column: ColumnRef) -> C { self.commitments.get(&column).unwrap().clone() } @@ -84,6 +96,9 @@ impl SchemaAccessor for BenchmarkAccessor<'_, C> { fn lookup_column(&self, table_ref: TableRef, column_id: Identifier) -> Option { self.column_types.get(&(table_ref, column_id)).copied() } + /// # Panics + /// + /// Will panic if the table reference does not exist in the table schemas map. fn lookup_schema(&self, table_ref: TableRef) -> Vec<(Identifier, ColumnType)> { self.table_schemas.get(&table_ref).unwrap().clone() } diff --git a/crates/proof-of-sql/benches/scaffold/mod.rs b/crates/proof-of-sql/benches/scaffold/mod.rs index 1c7c0fb9c..4a3e665b5 100644 --- a/crates/proof-of-sql/benches/scaffold/mod.rs +++ b/crates/proof-of-sql/benches/scaffold/mod.rs @@ -12,6 +12,13 @@ pub mod querys; mod random_util; use random_util::{generate_random_columns, OptionalRandBound}; +/// # Panics +/// +/// Will panic if: +/// - The table reference cannot be parsed from the string. +/// - The columns generated from `generate_random_columns` lead to a failure in `insert_table`. +/// - The query string cannot be parsed into a `QueryExpr`. +/// - The creation of the `VerifiableQueryResult` fails due to invalid proof expressions. fn scaffold<'a, CP: CommitmentEvaluationProof>( query: &str, columns: &[(&str, ColumnType, OptionalRandBound)], @@ -36,6 +43,11 @@ fn scaffold<'a, CP: CommitmentEvaluationProof>( level = "debug", skip(query, columns, size, prover_setup, verifier_setup) )] +/// # Panics +/// +/// Will panic if: +/// - The call to `scaffold` results in a panic due to invalid inputs. +/// - The `verify` method of `VerifiableQueryResult` fails, indicating an invalid proof. pub fn jaeger_scaffold( title: &str, query: &str, diff --git a/crates/proof-of-sql/benches/scaffold/random_util.rs b/crates/proof-of-sql/benches/scaffold/random_util.rs index b58eebb02..dc3b988c2 100644 --- a/crates/proof-of-sql/benches/scaffold/random_util.rs +++ b/crates/proof-of-sql/benches/scaffold/random_util.rs @@ -7,6 +7,11 @@ use proof_of_sql_parser::Identifier; use rand::Rng; pub type OptionalRandBound = Option i64>; +/// # Panics +/// +/// Will panic if: +/// - The provided identifier cannot be parsed into an `Identifier` type. +/// - An unsupported `ColumnType` is encountered, triggering a panic in the `todo!()` macro. pub fn generate_random_columns<'a, S: Scalar>( alloc: &'a Bump, rng: &mut impl Rng, diff --git a/crates/proof-of-sql/examples/hello_world/main.rs b/crates/proof-of-sql/examples/hello_world/main.rs index 0e2b84985..00fd22876 100644 --- a/crates/proof-of-sql/examples/hello_world/main.rs +++ b/crates/proof-of-sql/examples/hello_world/main.rs @@ -10,17 +10,30 @@ use std::{ time::Instant, }; -/// TODO: add docs +/// # Panics +/// +/// Will panic if flushing the output fails, which can happen due to issues with the underlying output stream. fn start_timer(message: &str) -> Instant { print!("{}...", message); stdout().flush().unwrap(); Instant::now() } -/// TODO: add docs +/// # Panics +/// +/// This function does not panic under normal circumstances but may panic if the internal printing fails due to issues with the output stream. fn end_timer(instant: Instant) { println!(" {:?}", instant.elapsed()); } +/// # Panics +/// +/// - Will panic if the GPU initialization fails during `init_backend`. +/// - Will panic if the table reference cannot be parsed in `add_table`. +/// - Will panic if the offset provided to `add_table` is invalid. +/// - Will panic if the query string cannot be parsed in `QueryExpr::try_new`. +/// - Will panic if the table reference cannot be parsed in `QueryExpr::try_new`. +/// - Will panic if the query expression creation fails. +/// - Will panic if printing fails during error handling. fn main() { let timer = start_timer("Warming up GPU"); init_backend(); diff --git a/crates/proof-of-sql/examples/posql_db/main.rs b/crates/proof-of-sql/examples/posql_db/main.rs index 8901318a3..668e66f25 100644 --- a/crates/proof-of-sql/examples/posql_db/main.rs +++ b/crates/proof-of-sql/examples/posql_db/main.rs @@ -115,6 +115,10 @@ enum Commands { }, } +/// # Panics +/// +/// Will panic if the call to `stdout().flush()` fails, indicating that the +/// standard output stream could not be flushed fn start_timer(message: &str) -> Instant { print!("{}...", message); stdout().flush().unwrap(); @@ -125,6 +129,21 @@ fn end_timer(instant: Instant) { println!(" {:?}", instant.elapsed()); } +/// # Panics +/// +/// This function can panic under the following circumstances: +/// +/// - **GPU Initialization Failure**: The program will panic if the GPU backend initialization fails. +/// - **Commit Load Failure**: Panics if the commit cannot be loaded from the specified path. +/// - **Table Commitment Creation Failure**: Panics if the table commitment creation fails. +/// - **Commit Write Failure**: Panics if writing the commit to storage fails. +/// - **CSV Write Failure**: Panics if writing the table or batch data to the CSV accessor fails. +/// - **CSV Read Failure**: Panics if reading a CSV file into a record batch fails. +/// - **Query Parsing Failure**: Panics if parsing the query expression fails. +/// - **Proof Generation Failure**: Panics if generating the cryptographic proof fails. +/// - **Proof Verification Failure**: Panics if the proof verification process fails. +/// - **Serialization/Deserialization Failure**: Panics if the proof cannot be serialized or deserialized. +/// - **Record Batch Conversion Failure**: Panics if the query result cannot be converted into a `RecordBatch`. fn main() { let args = CliArgs::parse(); println!("Warming up GPU..."); diff --git a/crates/proof-of-sql/src/base/bit/bit_distribution.rs b/crates/proof-of-sql/src/base/bit/bit_distribution.rs index f8a4667cd..8f803a9ef 100644 --- a/crates/proof-of-sql/src/base/bit/bit_distribution.rs +++ b/crates/proof-of-sql/src/base/bit/bit_distribution.rs @@ -49,6 +49,7 @@ impl BitDistribution { self.vary_mask[3] & (1 << 63) != 0 } + #[allow(clippy::missing_panics_doc)] pub fn sign_bit(&self) -> bool { assert!(!self.has_varying_sign_bit()); self.or_all[3] & (1 << 63) != 0 @@ -126,8 +127,8 @@ impl BitDistribution { } /// Return the position of the most significant bit of the absolute values - /// - /// Panic if no bits are set to 1 + /// # Panics + /// Panics if no bits are set to 1 in the bit representation of `or_all`. pub fn most_significant_abs_bit(&self) -> usize { let mask = self.or_all[3] & !(1 << 63); if mask != 0 { diff --git a/crates/proof-of-sql/src/base/commitment/column_commitment_metadata.rs b/crates/proof-of-sql/src/base/commitment/column_commitment_metadata.rs index d63188c58..5affbc78a 100644 --- a/crates/proof-of-sql/src/base/commitment/column_commitment_metadata.rs +++ b/crates/proof-of-sql/src/base/commitment/column_commitment_metadata.rs @@ -66,6 +66,7 @@ impl ColumnCommitmentMetadata { } } + #[allow(clippy::missing_panics_doc)] /// Construct a [`ColumnCommitmentMetadata`] with widest possible bounds for the column type. #[must_use] pub fn from_column_type_with_max_bounds(column_type: ColumnType) -> Self { @@ -124,6 +125,7 @@ impl ColumnCommitmentMetadata { /// Combine two [`ColumnCommitmentMetadata`] as if their source collections are being unioned. /// /// Can error if the two metadatas are mismatched. + #[allow(clippy::missing_panics_doc)] pub fn try_union( self, other: ColumnCommitmentMetadata, @@ -150,6 +152,7 @@ impl ColumnCommitmentMetadata { /// /// This should be interpreted as the set difference of the two collections. /// The result would be the rows in self that are not also rows in other. + #[allow(clippy::missing_panics_doc)] pub fn try_difference( self, other: ColumnCommitmentMetadata, diff --git a/crates/proof-of-sql/src/base/commitment/column_commitments.rs b/crates/proof-of-sql/src/base/commitment/column_commitments.rs index 1f5c4064d..c84779051 100644 --- a/crates/proof-of-sql/src/base/commitment/column_commitments.rs +++ b/crates/proof-of-sql/src/base/commitment/column_commitments.rs @@ -168,6 +168,7 @@ impl ColumnCommitments { /// /// Will error on a variety of mismatches. /// See [`ColumnCommitmentsMismatch`] for an enumeration of these errors. + #[allow(clippy::missing_panics_doc)] pub fn try_append_rows_with_offset<'a, COL>( &mut self, columns: impl IntoIterator, @@ -259,6 +260,7 @@ impl ColumnCommitments { /// /// Will error on a variety of mismatches. /// See [`ColumnCommitmentsMismatch`] for an enumeration of these errors. + #[allow(clippy::missing_panics_doc)] pub fn try_add(self, other: Self) -> Result where Self: Sized, @@ -279,6 +281,7 @@ impl ColumnCommitments { /// /// Will error on a variety of mismatches. /// See [`ColumnCommitmentsMismatch`] for an enumeration of these errors. + #[allow(clippy::missing_panics_doc)] pub fn try_sub(self, other: Self) -> Result where Self: Sized, diff --git a/crates/proof-of-sql/src/base/commitment/query_commitments.rs b/crates/proof-of-sql/src/base/commitment/query_commitments.rs index 89f609e6b..ce4ffd1c3 100644 --- a/crates/proof-of-sql/src/base/commitment/query_commitments.rs +++ b/crates/proof-of-sql/src/base/commitment/query_commitments.rs @@ -80,6 +80,9 @@ impl MetadataAccessor for QueryCommitments { } } +/// # Panics +/// +/// Panics if the commitment for the table or column cannot be found. impl CommitmentAccessor for QueryCommitments { fn get_commitment(&self, column: ColumnRef) -> C { let table_commitment = self.get(&column.table_ref()).unwrap(); @@ -105,6 +108,9 @@ impl SchemaAccessor for QueryCommitments { .map(|column_metadata| *column_metadata.column_type()) } + /// # Panics + /// + /// Panics if the column metadata cannot be found. fn lookup_schema( &self, table_ref: crate::base::database::TableRef, diff --git a/crates/proof-of-sql/src/base/commitment/table_commitment.rs b/crates/proof-of-sql/src/base/commitment/table_commitment.rs index 74fe04fe0..61f463323 100644 --- a/crates/proof-of-sql/src/base/commitment/table_commitment.rs +++ b/crates/proof-of-sql/src/base/commitment/table_commitment.rs @@ -133,6 +133,10 @@ where impl TableCommitment { /// Create a new [`TableCommitment`] for a table from a commitment accessor. + #[allow( + clippy::missing_panics_doc, + reason = "The assertion ensures that from_accessor should not create columns with a negative range" + )] pub fn from_accessor_with_max_bounds( table_ref: TableRef, columns: &[ColumnField], @@ -225,6 +229,10 @@ impl TableCommitment { } /// Returns a [`TableCommitment`] to the provided table with the given row offset. + #[allow( + clippy::missing_panics_doc, + reason = "since OwnedTables cannot have columns of mixed length or duplicate identifiers" + )] pub fn from_owned_table_with_offset( owned_table: &OwnedTable, offset: usize, @@ -272,6 +280,9 @@ impl TableCommitment { /// /// Will error on a variety of mismatches. /// See [`ColumnCommitmentsMismatch`] for an enumeration of these errors. + /// # Panics + /// Panics if `owned_table` has duplicate identifiers. + /// Panics if `owned_table` contains columns of mixed length. pub fn append_owned_table( &mut self, owned_table: &OwnedTable, @@ -394,6 +405,7 @@ impl TableCommitment { /// /// Will error on a variety of mismatches, or if the provided columns have mixed length. #[cfg(feature = "arrow")] + #[allow(clippy::missing_panics_doc)] pub fn try_append_record_batch( &mut self, batch: &RecordBatch, @@ -429,6 +441,7 @@ impl TableCommitment { } /// Returns a [`TableCommitment`] to the provided arrow [`RecordBatch`] with the given row offset. + #[allow(clippy::missing_panics_doc)] #[cfg(feature = "arrow")] pub fn try_from_record_batch_with_offset( batch: &RecordBatch, diff --git a/crates/proof-of-sql/src/base/database/column.rs b/crates/proof-of-sql/src/base/database/column.rs index 65b807dd5..b65447e5c 100644 --- a/crates/proof-of-sql/src/base/database/column.rs +++ b/crates/proof-of-sql/src/base/database/column.rs @@ -73,6 +73,8 @@ impl<'a, S: Scalar> Column<'a, S> { } } /// Returns the length of the column. + /// # Panics + /// this function requires that `col` and `scals` have the same length. #[must_use] pub fn len(&self) -> usize { match self { @@ -200,6 +202,7 @@ impl<'a, S: Scalar> Column<'a, S> { } /// Convert a column to a vector of Scalar values with scaling + #[allow(clippy::missing_panics_doc)] pub(crate) fn to_scalar_with_scaling(self, scale: i8) -> Vec { let scale_factor = scale_scalar(S::ONE, scale).expect("Invalid scale factor"); match self { diff --git a/crates/proof-of-sql/src/base/database/column_operation.rs b/crates/proof-of-sql/src/base/database/column_operation.rs index 7108eb8de..f7a82a0f0 100644 --- a/crates/proof-of-sql/src/base/database/column_operation.rs +++ b/crates/proof-of-sql/src/base/database/column_operation.rs @@ -20,6 +20,11 @@ use proof_of_sql_parser::intermediate_ast::BinaryOperator; /// Determine the output type of an add or subtract operation if it is possible /// to add or subtract the two input types. If the types are not compatible, return /// an error. +/// +/// # Panics +/// +/// - Panics if `lhs` or `rhs` does not have a precision or scale when they are expected to be numeric types. +/// - Panics if `lhs` or `rhs` is an integer, and `lhs.max_integer_type(&rhs)` returns `None`. pub fn try_add_subtract_column_types( lhs: ColumnType, rhs: ColumnType, @@ -70,6 +75,11 @@ pub fn try_add_subtract_column_types( /// Determine the output type of a multiplication operation if it is possible /// to multiply the two input types. If the types are not compatible, return /// an error. +/// +/// # Panics +/// +/// - Panics if `lhs` or `rhs` does not have a precision or scale when they are expected to be numeric types. +/// - Panics if `lhs` or `rhs` is an integer, and `lhs.max_integer_type(&rhs)` returns `None`. pub fn try_multiply_column_types( lhs: ColumnType, rhs: ColumnType, @@ -116,6 +126,11 @@ pub fn try_multiply_column_types( /// Determine the output type of a division operation if it is possible /// to multiply the two input types. If the types are not compatible, return /// an error. +/// +/// # Panics +/// +/// - Panics if `lhs` or `rhs` does not have a precision or scale when they are expected to be numeric types. +/// - Panics if `lhs` or `rhs` is an integer, and `lhs.max_integer_type(&rhs)` returns `None`. pub fn try_divide_column_types( lhs: ColumnType, rhs: ColumnType, @@ -497,6 +512,9 @@ where /// Check whether a numerical slice is equal to a decimal one. /// /// Note that we do not check for length equality here. +/// # Panics +/// This function requires that `lhs` and `rhs` have the same length. +/// This function requires that `left_column_type` and `right_column_type` have the same precision and scale. pub(super) fn eq_decimal_columns( lhs: &[T], rhs: &[S], @@ -554,6 +572,9 @@ where /// Check whether a numerical slice is less than or equal to a decimal one. /// /// Note that we do not check for length equality here. +/// # Panics +/// This function requires that `lhs` and `rhs` have the same length. +/// This function requires that `left_column_type` and `right_column_type` have the same precision and scale. pub(super) fn le_decimal_columns( lhs: &[T], rhs: &[S], @@ -624,6 +645,9 @@ where /// Check whether a numerical slice is greater than or equal to a decimal one. /// /// Note that we do not check for length equality here. +/// # Panics +/// This function requires that `lhs` and `rhs` have the same length. +/// This function requires that `left_column_type` and `right_column_type` have the same precision and scale. pub(super) fn ge_decimal_columns( lhs: &[T], rhs: &[S], @@ -694,6 +718,9 @@ where /// Add two numerical slices as decimals. /// /// We do not check for length equality here +/// # Panics +/// This function requires that `lhs` and `rhs` have the same length. +/// This function requires that `left_column_type` and `right_column_type` have the same precision and scale. pub(super) fn try_add_decimal_columns( lhs: &[T0], rhs: &[T1], @@ -748,6 +775,9 @@ where /// Subtract one numerical slice from another as decimals. /// /// We do not check for length equality here +/// # Panics +/// This function requires that `lhs` and `rhs` have the same length. +/// This function requires that `left_column_type` and `right_column_type` have the same precision and scale. pub(super) fn try_subtract_decimal_columns( lhs: &[T0], rhs: &[T1], @@ -805,6 +835,9 @@ where /// Multiply two numerical slices as decimals. /// /// We do not check for length equality here +/// # Panics +/// This function requires that `lhs` and `rhs` have the same length. +/// This function requires that `left_column_type` and `right_column_type` have the same precision and scale. pub(super) fn try_multiply_decimal_columns( lhs: &[T0], rhs: &[T1], @@ -842,6 +875,7 @@ where /// 4. Precision and scale follow T-SQL rules. That is, /// - `new_scale = max(6, right_precision + left_scale + 1)` /// - `new_precision = left_precision - left_scale + right_scale + new_scale` +#[allow(clippy::missing_panics_doc)] pub(crate) fn try_divide_decimal_columns( lhs: &[T0], rhs: &[T1], diff --git a/crates/proof-of-sql/src/base/database/filter_util.rs b/crates/proof-of-sql/src/base/database/filter_util.rs index a8afaf261..898ee2b21 100644 --- a/crates/proof-of-sql/src/base/database/filter_util.rs +++ b/crates/proof-of-sql/src/base/database/filter_util.rs @@ -8,6 +8,8 @@ use bumpalo::Bump; /// /// The function returns a tuple of the filtered columns and the number of /// rows in the filtered columns. +/// # Panics +/// This function requires that `columns` and `selection` have the same length. pub fn filter_columns<'a, S: Scalar>( alloc: &'a Bump, columns: &[Column<'a, S>], diff --git a/crates/proof-of-sql/src/base/database/owned_and_arrow_conversions.rs b/crates/proof-of-sql/src/base/database/owned_and_arrow_conversions.rs index 04af8a007..7c7e0ea82 100644 --- a/crates/proof-of-sql/src/base/database/owned_and_arrow_conversions.rs +++ b/crates/proof-of-sql/src/base/database/owned_and_arrow_conversions.rs @@ -77,6 +77,11 @@ pub enum OwnedArrowConversionError { }, } +/// # Panics +/// +/// Will panic if setting precision and scale fails when converting `OwnedColumn::Int128`. +/// Will panic if setting precision and scale fails when converting `OwnedColumn::Decimal75`. +/// Will panic if trying to convert `OwnedColumn::Scalar`, as this conversion is not implemented impl From> for ArrayRef { fn from(value: OwnedColumn) -> Self { match value { @@ -134,6 +139,16 @@ impl TryFrom for OwnedColumn { } impl TryFrom<&ArrayRef> for OwnedColumn { type Error = OwnedArrowConversionError; + /// # Panics + /// + /// Will panic if downcasting fails for the following types: + /// - `BooleanArray` when converting from `DataType::Boolean`. + /// - `Int16Array` when converting from `DataType::Int16`. + /// - `Int32Array` when converting from `DataType::Int32`. + /// - `Int64Array` when converting from `DataType::Int64`. + /// - `Decimal128Array` when converting from `DataType::Decimal128(38, 0)`. + /// - `Decimal256Array` when converting from `DataType::Decimal256` if precision is less than or equal to 75. + /// - `StringArray` when converting from `DataType::Utf8`. fn try_from(value: &ArrayRef) -> Result { match &value.data_type() { // Arrow uses a bit-packed representation for booleans. diff --git a/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs b/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs index 08249a8c9..94752130b 100644 --- a/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs +++ b/crates/proof-of-sql/src/base/database/owned_table_test_accessor.rs @@ -49,7 +49,11 @@ impl TestAccessor fn add_table(&mut self, table_ref: TableRef, data: Self::Table, table_offset: usize) { self.tables.insert(table_ref, (data, table_offset)); } - + /// + /// # Panics + /// + /// Will panic if the `table_ref` is not found in `self.tables`, indicating + /// that an invalid reference was provided. fn get_column_names(&self, table_ref: TableRef) -> Vec<&str> { self.tables .get(&table_ref) @@ -60,10 +64,21 @@ impl TestAccessor .collect() } + /// + /// # Panics + /// + /// Will panic if the `table_ref` is not found in `self.tables`, indicating that an invalid reference was provided. fn update_offset(&mut self, table_ref: TableRef, new_offset: usize) { self.tables.get_mut(&table_ref).unwrap().1 = new_offset; } } + +/// +/// # Panics +/// +/// Will panic if the `column.table_ref()` is not found in `self.tables`, or if +/// the `column.column_id()` is not found in the inner table for that reference, +/// indicating that an invalid column reference was provided. impl DataAccessor for OwnedTableTestAccessor<'_, CP> { fn get_column(&self, column: ColumnRef) -> Column { match self @@ -97,6 +112,11 @@ impl DataAccessor for OwnedTableTestA } } } + +/// +/// # Panics +/// +/// Will panic if the `column.table_ref()` is not found in `self.tables`, or if the `column.column_id()` is not found in the inner table for that reference,indicating that an invalid column reference was provided. impl CommitmentAccessor for OwnedTableTestAccessor<'_, CP> { @@ -112,10 +132,17 @@ impl CommitmentAccessor } } impl MetadataAccessor for OwnedTableTestAccessor<'_, CP> { + /// + /// # Panics + /// + /// Will panic if the `table_ref` is not found in `self.tables`, indicating that an invalid reference was provided. fn get_length(&self, table_ref: TableRef) -> usize { self.tables.get(&table_ref).unwrap().0.num_rows() } - + /// + /// # Panics + /// + /// Will panic if the `table_ref` is not found in `self.tables`, indicating that an invalid reference was provided. fn get_offset(&self, table_ref: TableRef) -> usize { self.tables.get(&table_ref).unwrap().1 } @@ -131,7 +158,10 @@ impl SchemaAccessor for OwnedTableTestAccessor<'_ .column_type(), ) } - + /// + /// # Panics + /// + /// Will panic if the `table_ref` is not found in `self.tables`, indicating that an invalid reference was provided. fn lookup_schema(&self, table_ref: TableRef) -> Vec<(Identifier, ColumnType)> { self.tables .get(&table_ref) diff --git a/crates/proof-of-sql/src/base/database/owned_table_utility.rs b/crates/proof-of-sql/src/base/database/owned_table_utility.rs index 880b99440..a0d3188ae 100644 --- a/crates/proof-of-sql/src/base/database/owned_table_utility.rs +++ b/crates/proof-of-sql/src/base/database/owned_table_utility.rs @@ -39,6 +39,9 @@ use proof_of_sql_parser::{ /// decimal75("f", 12, 1, [1, 2, 3]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if converting the iterator into an `OwnedTable` fails. pub fn owned_table( iter: impl IntoIterator)>, ) -> OwnedTable { @@ -48,11 +51,13 @@ pub fn owned_table( /// Creates a `(Identifier, OwnedColumn)` pair for a smallint column. /// This is primarily intended for use in conjunction with [`owned_table`]. /// # Example -/// ``` -/// use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; +/// ```use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; /// let result = owned_table::([ /// smallint("a", [1_i16, 2, 3]), /// ]); +///``` +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn smallint( name: impl Deref, data: impl IntoIterator>, @@ -66,11 +71,13 @@ pub fn smallint( /// Creates a `(Identifier, OwnedColumn)` pair for an int column. /// This is primarily intended for use in conjunction with [`owned_table`]. /// # Example -/// ``` -/// use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; +/// ```use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; /// let result = owned_table::([ /// int("a", [1, 2, 3]), /// ]); +///``` +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn int( name: impl Deref, data: impl IntoIterator>, @@ -84,11 +91,12 @@ pub fn int( /// Creates a `(Identifier, OwnedColumn)` pair for a bigint column. /// This is primarily intended for use in conjunction with [`owned_table`]. /// # Example -/// ``` -/// use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; +/// ``` use proof_of_sql::base::{database::owned_table_utility::*, scalar::Curve25519Scalar}; /// let result = owned_table::([ /// bigint("a", [1, 2, 3]), /// ]); +///]``` +#[allow(clippy::missing_panics_doc)] pub fn bigint( name: impl Deref, data: impl IntoIterator>, @@ -108,6 +116,9 @@ pub fn bigint( /// boolean("a", [true, false, true]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn boolean( name: impl Deref, data: impl IntoIterator>, @@ -127,6 +138,9 @@ pub fn boolean( /// int128("a", [1, 2, 3]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn int128( name: impl Deref, data: impl IntoIterator>, @@ -146,6 +160,9 @@ pub fn int128( /// scalar("a", [1, 2, 3]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn scalar( name: impl Deref, data: impl IntoIterator>, @@ -165,6 +182,9 @@ pub fn scalar( /// varchar("a", ["a", "b", "c"]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn varchar( name: impl Deref, data: impl IntoIterator>, @@ -184,6 +204,10 @@ pub fn varchar( /// decimal75("a", 12, 1, [1, 2, 3]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. +/// - Panics if creating the `Precision` from the specified precision value fails. pub fn decimal75( name: impl Deref, precision: u8, @@ -221,6 +245,9 @@ pub fn decimal75( /// timestamptz("event_time", PoSQLTimeUnit::Second, PoSQLTimeZone::Utc, vec![1625072400, 1625076000, 1625079600]), /// ]); /// ``` +/// +/// # Panics +/// - Panics if `name.parse()` fails to convert the name into an `Identifier`. pub fn timestamptz( name: impl Deref, time_unit: PoSQLTimeUnit, diff --git a/crates/proof-of-sql/src/base/database/record_batch_utility.rs b/crates/proof-of-sql/src/base/database/record_batch_utility.rs index a8ec5782f..ae4df097d 100644 --- a/crates/proof-of-sql/src/base/database/record_batch_utility.rs +++ b/crates/proof-of-sql/src/base/database/record_batch_utility.rs @@ -115,6 +115,10 @@ impl ToArrow for Vec { arrow::datatypes::DataType::Decimal128(38, 0) } + /// + /// # Panics + /// + /// Will panic if the conversion to a Decimal128Array fails, which can happen if the data exceeds the specified precision and scale (38, 0). Ensure that all values are within the valid range for the Decimal128 type. fn to_array(self) -> Arc { Arc::new( arrow::array::Decimal128Array::from(self) @@ -152,6 +156,12 @@ string_to_arrow_array!( /// Utility macro to simplify the creation of [`RecordBatch`](arrow::record_batch::RecordBatch) instances #[macro_export] +/// +/// # Panics +/// +/// Will panic if the `RecordBatch` creation fails. This can occur if: +/// - The lengths of the provided slices are not equal. +/// - The `to_array()` method on any slice returns an error, indicating invalid data types or mismatched lengths. macro_rules! record_batch { ($($col_name:expr => $slice:expr), + $(,)?) => { { @@ -167,7 +177,6 @@ macro_rules! record_batch { ,)+])); let arrays = vec![$($slice.to_vec().to_array(),)+]; - RecordBatch::try_new(schema, arrays).unwrap() } } diff --git a/crates/proof-of-sql/src/base/database/test_accessor_utility.rs b/crates/proof-of-sql/src/base/database/test_accessor_utility.rs index 6e1ddceac..bac58bd9c 100644 --- a/crates/proof-of-sql/src/base/database/test_accessor_utility.rs +++ b/crates/proof-of-sql/src/base/database/test_accessor_utility.rs @@ -39,6 +39,15 @@ impl Default for RandomTestAccessorDescriptor { } /// Generate a DataFrame with random data +/// +/// # Panics +/// +/// This function may panic in the following cases: +/// - If `Precision::new(7)` fails when creating a `Decimal75` column type, which would occur +/// if the precision is invalid. +/// - When calling `.unwrap()` on the result of `RecordBatch::try_new(schema, columns)`, which +/// will panic if the schema and columns do not align correctly or if there are any other +/// underlying errors. #[allow(dead_code)] pub fn make_random_test_accessor_data( rng: &mut StdRng, diff --git a/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs b/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs index 3a6a9cd33..9f39a132d 100644 --- a/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs +++ b/crates/proof-of-sql/src/base/polynomial/composite_polynomial.rs @@ -70,6 +70,7 @@ impl CompositePolynomial { /// Add a list of multilinear extensions that is meant to be multiplied together. /// The resulting polynomial will be multiplied by the scalar `coefficient`. + #[allow(clippy::missing_panics_doc)] pub fn add_product(&mut self, product: impl IntoIterator>>, coefficient: S) { let product: Vec>> = product.into_iter().collect(); let mut indexed_product = Vec::with_capacity(product.len()); diff --git a/crates/proof-of-sql/src/base/polynomial/interpolate.rs b/crates/proof-of-sql/src/base/polynomial/interpolate.rs index e9d67a8d6..ed821e3c6 100644 --- a/crates/proof-of-sql/src/base/polynomial/interpolate.rs +++ b/crates/proof-of-sql/src/base/polynomial/interpolate.rs @@ -10,11 +10,12 @@ use num_traits::{Inv, One, Zero}; /// Interpolate a uni-variate degree-`polynomial.len()-1` polynomial and evaluate this /// polynomial at `x`: -/// /// For any polynomial, `f(x)`, with degree less than or equal to `d`, we have that: /// `f(x) = sum_{i=0}^{d} (-1)^(d-i) * (f(i) / (i! * (d-i)! * (x-i))) * prod_{i=0}^{d} (x-i)` +// Allow missing panics documentation because the function should not panic under normal conditions. /// unless x is one of 0,1,...,d, in which case, f(x) is already known. -#[allow(dead_code)] +#[allow(dead_code, clippy::missing_panics_doc)] + pub fn interpolate_uni_poly(polynomial: &[F], x: F) -> F where F: Copy @@ -56,7 +57,9 @@ where let new_term = polynomial[i] * (factorials[i] * factorials[degree - i] * x_minus_i) .inv() - .unwrap(); // This unwrap is safe because we are guarenteed that x-i is not zero, and factorials are never zero. + .expect( + "Inverse computation failed unexpectedly. This should not happen as `x != i`.", + ); // This handles the (-1)^(d-i) sign. if (degree - i) % 2 == 0 { @@ -73,6 +76,10 @@ where /// Let `d` be `evals.len() - 1` and let `f` be the polynomial such that `f(i) = evals[i]`. /// The output of this function is the vector of coefficients of `f`, with the leading coefficient first. /// That is, `f(x) = evals[j] * x^(d - j)`. +#[allow(clippy::missing_panics_doc)] +// This function is guaranteed not to panic because: +// - The product in `inv()` will never be zero, as the numbers being multiplied are all non-zero by construction. +// - If there are no elements to reduce, `unwrap_or(vec![])` provides an empty vector as a safe fallback. pub fn interpolate_evaluations_to_reverse_coefficients(evals: &[S]) -> Vec where S: Zero @@ -96,7 +103,7 @@ where .map(S::from) .product::() .inv() - .unwrap() + .expect("Product will never be zero because the terms being multiplied are non-zero by construction.") * eval_i; // Then multiply by the appropriate linear terms: // for j in 0..=n if j != i { diff --git a/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation.rs b/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation.rs index 45f9dd121..be65d36a7 100644 --- a/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation.rs +++ b/crates/proof-of-sql/src/base/polynomial/lagrange_basis_evaluation.rs @@ -20,6 +20,9 @@ where // The returned value from this function is (part, full). // The full value is what the result would be if it were not truncated. (In other words, if length==2^nu.) // This can be iteratively used to compute the actual result. +/// # Panics +/// this function requires that `a` and `b` have the same length. +/// This function requires that `length` is less than or equal to `1 << nu` where `nu` is the length of `a` and `b`. fn compute_truncated_lagrange_basis_inner_product_impl( part_length: usize, a: &[F], @@ -78,6 +81,11 @@ where /// (1-a[0])(a[1])...(1-a[nu-1]) + /// (a[0])(a[1])...(1-a[nu-1]) + ... /// ``` +/// # Panics +/// Panics if: +/// - The length is greater than `1` when `point` is empty. +/// - The length is greater than the maximum allowed for the given number of points, which is `2^(nu - 1)` +/// where `nu` is the number of elements in `point`. pub fn compute_truncated_lagrange_basis_sum(length: usize, point: &[F]) -> F where F: One + Zero + Mul + Add + Sub + Copy, diff --git a/crates/proof-of-sql/src/base/proof/transcript.rs b/crates/proof-of-sql/src/base/proof/transcript.rs index 4fa6e2977..c50ba74da 100644 --- a/crates/proof-of-sql/src/base/proof/transcript.rs +++ b/crates/proof-of-sql/src/base/proof/transcript.rs @@ -28,11 +28,17 @@ pub trait Transcript { /// Request a challenge. Returns the raw, unreversed, bytes. (i.e. littleendian form) fn challenge_as_le(&mut self) -> [u8; 32]; - /// Appends a type that implements [`serde::Serialize`] by appending the raw bytes (i.e. assuming the message is littleendian) + /// Appends a type that implements [serde::Serialize] by appending the raw bytes (i.e. assuming the message is littleendian) + /// + /// # Panics + /// - Panics if `postcard::to_allocvec(message)` fails to serialize the message. fn extend_serialize_as_le(&mut self, message: &(impl serde::Serialize + ?Sized)) { self.extend_as_le_from_refs([postcard::to_allocvec(message).unwrap().as_slice()]); } /// Appends a type that implements [`ark_serialize::CanonicalSerialize`] by appending the raw bytes (i.e. assuming the message is littleendian) + /// + /// # Panics + /// - Panics if `message.serialize_compressed(&mut buf)` fails to serialize the message. fn extend_canonical_serialize_as_le( &mut self, message: &(impl ark_serialize::CanonicalSerialize + ?Sized), diff --git a/crates/proof-of-sql/src/base/proof/transcript_core.rs b/crates/proof-of-sql/src/base/proof/transcript_core.rs index cae1709ca..08e7b73ae 100644 --- a/crates/proof-of-sql/src/base/proof/transcript_core.rs +++ b/crates/proof-of-sql/src/base/proof/transcript_core.rs @@ -15,6 +15,9 @@ pub(super) trait TranscriptCore { } /// private method to facilitate recieving challenges and reversing them. Undefined behavior if the size of `M` is not 32 bytes. +/// +/// # Panics +/// - Panics if `M::read_from(&bytes)` fails to read the bytes into the type `M`. fn receive_challenge_as_be(slf: &mut impl TranscriptCore) -> M { debug_assert_eq!(32, core::mem::size_of::()); let mut bytes = slf.raw_challenge(); diff --git a/crates/proof-of-sql/src/base/scalar/mont_scalar.rs b/crates/proof-of-sql/src/base/scalar/mont_scalar.rs index dc3a3a997..fafdad028 100644 --- a/crates/proof-of-sql/src/base/scalar/mont_scalar.rs +++ b/crates/proof-of-sql/src/base/scalar/mont_scalar.rs @@ -129,6 +129,10 @@ impl> MontScalar { Self(value) } /// Create a new `MontScalar` from a `[u64, 4]`. The array is expected to be in non-montgomery form. + /// + /// # Panics + /// + /// This method will panic if the provided `[u64; 4]` cannot be converted into a valid `BigInt` due to an overflow or invalid input. The method unwraps the result of `Fp::from_bigint`, which will panic if the `BigInt` does not represent a valid field element ("Invalid input" refers to an integer that is outside the valid range [0,p-1] for the prime field or cannot be represented as a canonical field element. It can also occur due to overflow or issues in the conversion process.). pub fn from_bigint(vals: [u64; 4]) -> Self { Self(Fp::from_bigint(ark_ff::BigInt(vals)).unwrap()) } @@ -275,6 +279,10 @@ impl From for curve25519_dalek::scalar::Scalar { } impl From<&Curve25519Scalar> for curve25519_dalek::scalar::Scalar { + /// + /// # Panics + /// + /// This method will panic if the byte array is not of the expected length (32 bytes) or if it cannot be converted to a valid canonical scalar. However, under normal conditions, valid `Curve25519Scalar` values should always satisfy these requirements. fn from(value: &Curve25519Scalar) -> Self { let bytes = ark_ff::BigInteger::to_bytes_le(&value.0.into_bigint()); curve25519_dalek::scalar::Scalar::from_canonical_bytes(bytes.try_into().unwrap()).unwrap() diff --git a/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs b/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs index 9aa71dcd1..75e010bfa 100644 --- a/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs +++ b/crates/proof-of-sql/src/base/slice_ops/batch_inverse.rs @@ -24,6 +24,10 @@ use rayon::prelude::*; /// Given a vector of field elements `{v_i}`, compute the vector `{v_i^(-1)}` using Montgomery's trick. /// The vector is modified in place. /// Any zero elements in the vector are left unchanged. +/// +/// # Panics +/// - Panics if the inversion of `tmp` fails, which can happen if `tmp` is zero, +/// although this case is guaranteed to be non-zero based on the preceding logic. pub fn batch_inversion(v: &mut [F]) where F: One + Zero + MulAssign + Inv> + Mul + Send + Sync + Copy, @@ -54,6 +58,10 @@ where ); } +/// # Panics +/// * This function panics if the inversion operation (`inv()`) fails, which can happen if the slice +/// contains any zero elements. However, zero elements are skipped, so this unwrap is guaranteed +/// to succeed unless all elements are zero. fn serial_batch_inversion_and_mul(v: &mut [F], coeff: F) where F: One + Zero + MulAssign + Inv> + Mul + Copy, diff --git a/crates/proof-of-sql/src/base/slice_ops/batch_inverse_test.rs b/crates/proof-of-sql/src/base/slice_ops/batch_inverse_test.rs index 817876e9f..53283aae7 100644 --- a/crates/proof-of-sql/src/base/slice_ops/batch_inverse_test.rs +++ b/crates/proof-of-sql/src/base/slice_ops/batch_inverse_test.rs @@ -17,7 +17,6 @@ fn we_can_pseudo_invert_arrays_of_length_1_with_non_zero() { assert_eq!(res.len(), input.len()); res.copy_from_slice(&input[..]); slice_ops::batch_inversion(&mut res[..]); - assert!(res == vec![input[0].inv().unwrap()]); } diff --git a/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs b/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs index ff020a9e2..c9c019288 100644 --- a/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs +++ b/crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs @@ -5,7 +5,8 @@ use rayon::iter::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelI /// This operation does `result[i] += multiplier * to_mul_add[i]` for `i` in `0..to_mul_add.len()`. /// -/// It panics if `result.len() < to_mul_add.len()`. +/// # Panics +/// Panics if the length of `result` is less than the length of `to_mul_add`. pub fn mul_add_assign(result: &mut [T], multiplier: T, to_mul_add: &[S]) where T: Send + Sync + Mul + AddAssign + Copy, diff --git a/crates/proof-of-sql/src/lib.rs b/crates/proof-of-sql/src/lib.rs index a2d313f71..d16905bdd 100644 --- a/crates/proof-of-sql/src/lib.rs +++ b/crates/proof-of-sql/src/lib.rs @@ -1,6 +1,8 @@ +#![cfg_attr(test, allow(clippy::missing_panics_doc))] #![doc = include_str!("../README.md")] #![cfg_attr(not(feature = "std"), no_std)] -#![allow(clippy::missing_panics_doc, clippy::module_name_repetitions)] +#![allow(clippy::module_name_repetitions)] + extern crate alloc; pub mod base; diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_cpu.rs b/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_cpu.rs index 1d103421a..0ab4403b9 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_cpu.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_cpu.rs @@ -5,6 +5,13 @@ use ark_ec::VariableBaseMSM; use core::iter::once; #[tracing::instrument(name = "compute_dory_commitment_impl (cpu)", level = "debug", skip_all)] +/// # Panics +/// +/// Will panic if: +/// - `Gamma_1.last()` returns `None` when computing the first row commitment. +/// - `Gamma_1.last()` returns `None` when computing remaining row commitments. +/// - `Gamma_2.last()` returns `None` when computing the commitment for the entire matrix. +/// - The slices accessed in `Gamma_1.last().unwrap()` or `Gamma_2.last().unwrap()` are out of bounds. fn compute_dory_commitment_impl<'a, T>( column: &'a [T], offset: usize, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_gpu.rs b/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_gpu.rs index 2bf45716c..9730dcc15 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_gpu.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dory_commitment_helper_gpu.rs @@ -10,6 +10,11 @@ use tracing::{span, Level}; level = "debug", skip_all )] +/// # Panics +/// +/// Will panic if: +/// - `Gamma_2.last()` returns `None` during the computation of the gamma_2 slice. +/// - The slice indexing in `gamma_2.last().unwrap()` is out of bounds, which can happen if `gamma_2_offset + num_sub_commits` exceeds the length of `Gamma_2`. fn compute_dory_commitments_packed_impl( committable_columns: &[CommittableColumn], offset: usize, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs b/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs index 9adff61ad..42acfcd6e 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dory_messages.rs @@ -57,12 +57,20 @@ impl DoryMessages { self.GT_messages.insert(0, message); } /// Pops a field element from the verifier's queue, and appends it to the transcript. + /// + /// # Panics + /// + /// Will panic if there are no messages in the queue (i.e., `F_messages` is empty), indicating that the prover attempted to receive a message that was never sent. pub(super) fn prover_recieve_F_message(&mut self, transcript: &mut impl Transcript) -> F { let message = self.F_messages.pop().unwrap(); transcript.extend_canonical_serialize_as_le(&message); message } /// Pops a G1 element from the verifier's queue, and appends it to the transcript. + /// + /// # Panics + /// + /// Will panic if there are no messages in the queue (i.e., `G1_messages` is empty), indicating pub(super) fn prover_recieve_G1_message( &mut self, transcript: &mut impl Transcript, @@ -72,6 +80,10 @@ impl DoryMessages { message } /// Pops a G2 element from the verifier's queue, and appends it to the transcript. + /// + /// # Panics + /// + /// Will panic if there are no messages in the queue (i.e., `G2_messages` is empty), indicating that the prover attempted to receive a message that was never sent. pub(super) fn prover_recieve_G2_message( &mut self, transcript: &mut impl Transcript, @@ -81,6 +93,10 @@ impl DoryMessages { message } /// Pops a GT element from the verifier's queue, and appends it to the transcript. + /// + /// # Panics + /// + /// Will panic if there are no messages in the queue (i.e., `GT_messages` is empty), indicating that the prover attempted to receive a message that was never sent. pub(super) fn prover_recieve_GT_message(&mut self, transcript: &mut impl Transcript) -> GT { let message = self.GT_messages.pop().unwrap(); transcript.extend_canonical_serialize_as_le(&message); @@ -90,6 +106,10 @@ impl DoryMessages { /// This message is produces as a challenge from the transcript. /// /// While the message is a simple field element, we ensure that it is non-zero, and also return it's inverse. + /// + /// # Panics + /// + /// Will panic if the challenge process fails to generate a non-zero message, indicating a logical issue in the protocol's challenge generation. pub(super) fn verifier_F_message(&mut self, transcript: &mut impl Transcript) -> (F, F) { let mut message = F::zero(); while message.is_zero() { diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_commitment_helper_cpu.rs b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_commitment_helper_cpu.rs index 49c6b224f..6b2a7a93b 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_commitment_helper_cpu.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_commitment_helper_cpu.rs @@ -6,6 +6,12 @@ use crate::base::commitment::CommittableColumn; use alloc::{vec, vec::Vec}; #[tracing::instrument(name = "compute_dory_commitment_impl (cpu)", level = "debug", skip_all)] +/// # Panics +/// +/// Will panic if: +/// - `setup.Gamma_1.last()` returns `None`, indicating that `Gamma_1` is empty. +/// - `setup.Gamma_2.last()` returns `None`, indicating that `Gamma_2` is empty. +/// - The indexing for `Gamma_2` with `first_row..=last_row` goes out of bounds. fn compute_dory_commitment_impl<'a, T>( column: &'a [T], offset: usize, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_helper.rs b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_helper.rs index 39d822a68..a93bd6130 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_helper.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/dynamic_dory_helper.rs @@ -51,6 +51,8 @@ pub(super) fn compute_dynamic_nu(num_vars: usize) -> usize { /// Compute the hi and lo vectors (or L and R) that are derived from `point`. /// L and R are the vectors such that LMR is exactly the evaluation of `a` at the point `point`. +/// # Panics +/// This function requires that `point` has length at least as big as the number of rows in `M` that is created by `a`. pub(super) fn compute_dynamic_vecs(point: &[F]) -> (Vec, Vec) { let nu = point.len() / 2 + 1; let mut lo_vec = vec![F::ZERO; 1 << nu]; @@ -74,6 +76,8 @@ pub(super) fn compute_dynamic_vecs(point: &[F]) -> (Vec, Vec) { /// /// This is the analogous function of the non-dynamic folding function [`extended_dory_reduce_verify_fold_s_vecs`](super::extended_dory_reduce_helper::extended_dory_reduce_verify_fold_s_vecs). /// See that method for more details. +/// # Panics +/// This function requires that `point` has length at least as big as the number of rows in `M` that is created by `a`. In practice, `point` is normally length `1 << nu`. pub(super) fn fold_dynamic_tensors(state: &ExtendedVerifierState) -> (F, F) { let point = &state.s1_tensor; let nu = point.len() / 2 + 1; diff --git a/crates/proof-of-sql/src/proof_primitive/dory/fold_scalars.rs b/crates/proof-of-sql/src/proof_primitive/dory/fold_scalars.rs index 1ffbcdb88..0769ff6ea 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/fold_scalars.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/fold_scalars.rs @@ -8,6 +8,7 @@ use crate::base::proof::Transcript; /// This is the prover side of the Fold-Scalars algorithm in section 4.1 of . /// /// Note: this only works for nu = 0. +#[allow(clippy::missing_panics_doc)] pub fn fold_scalars_0_prove( messages: &mut DoryMessages, transcript: &mut impl Transcript, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/pack_scalars.rs b/crates/proof-of-sql/src/proof_primitive/dory/pack_scalars.rs index bc86100f3..1ce4d4fce 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/pack_scalars.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/pack_scalars.rs @@ -186,6 +186,7 @@ fn pack_bit>( /// * `offset` - The offset to the data. /// * `num_matrix_commitment_columns` - The number of generators used for msm. /// * `buffer` - Pre-allocated offset column buffer. +#[allow(clippy::missing_panics_doc)] fn offset_column( committable_columns: &[CommittableColumn], offset: usize, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/pairings.rs b/crates/proof-of-sql/src/proof_primitive/dory/pairings.rs index fd3f9128a..50c86a381 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/pairings.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/pairings.rs @@ -61,6 +61,8 @@ pub fn multi_pairing_4( ) { multi_pairing_4_impl((a0, b0), (a1, b1), (a2, b2), (a3, b3)) } +/// # Panics +/// This function may panic if the final exponentiation fails due to invalid inputs, or if the multi-pairing operation encounters an error with the provided elements. fn multi_pairing_impl( a: impl IntoIterator + Send>, b: impl IntoIterator + Send>, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs b/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs index a6101e71e..e8f2534f6 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/scalar_product.rs @@ -2,7 +2,8 @@ use super::{pairings, DoryMessages, ProverState, VerifierSetup, VerifierState}; use crate::base::proof::Transcript; -/// This is the prover side of the Scalar-Product algorithm in section 3.1 of . +/// This is the prover side of the Scalar-Product algorithm in section 3.1 of https://eprint.iacr.org/2020/1274.pdf. +#[allow(clippy::missing_panics_doc)] pub fn scalar_product_prove( messages: &mut DoryMessages, transcript: &mut impl Transcript, diff --git a/crates/proof-of-sql/src/proof_primitive/dory/setup.rs b/crates/proof-of-sql/src/proof_primitive/dory/setup.rs index 11479256a..a516d90b8 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/setup.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/setup.rs @@ -35,6 +35,8 @@ pub struct ProverSetup<'a> { impl<'a> ProverSetup<'a> { /// Create a new `ProverSetup` from the public parameters. + /// # Panics + /// Panics if the length of `Gamma_1` or `Gamma_2` is not equal to `2^max_nu`. pub(super) fn new( Gamma_1: &'a [G1Affine], Gamma_2: &'a [G2Affine], @@ -141,6 +143,9 @@ impl_serde_for_ark_serde_unchecked!(VerifierSetup); impl VerifierSetup { /// Create a new `VerifierSetup` from the public parameters. + /// # Panics + /// Panics if the length of `Gamma_1_nu` is not equal to `2^max_nu`. + /// Panics if the length of `Gamma_2_nu` is not equal to `2^max_nu`. pub(super) fn new( Gamma_1_nu: &[G1Affine], Gamma_2_nu: &[G2Affine], diff --git a/crates/proof-of-sql/src/proof_primitive/dory/state.rs b/crates/proof-of-sql/src/proof_primitive/dory/state.rs index 25e7573a0..633db8c77 100644 --- a/crates/proof-of-sql/src/proof_primitive/dory/state.rs +++ b/crates/proof-of-sql/src/proof_primitive/dory/state.rs @@ -19,6 +19,9 @@ pub struct ProverState { impl ProverState { /// Create a new `ProverState` from the witness. + /// # Panics + /// Panics if the length of `v1` is not equal to `2^nu`. + /// Panics if the length of `v2` is not equal to `2^nu`. pub fn new(v1: Vec, v2: Vec, nu: usize) -> Self { assert_eq!(v1.len(), 1 << nu); assert_eq!(v2.len(), 1 << nu); diff --git a/crates/proof-of-sql/src/proof_primitive/sumcheck/prover_round.rs b/crates/proof-of-sql/src/proof_primitive/sumcheck/prover_round.rs index 9ae1d8e7c..6f5f4310c 100644 --- a/crates/proof-of-sql/src/proof_primitive/sumcheck/prover_round.rs +++ b/crates/proof-of-sql/src/proof_primitive/sumcheck/prover_round.rs @@ -106,6 +106,8 @@ pub fn prove_round(prover_state: &mut ProverState, r_maybe: &Optio /// `ark_impl`: `multiplicand.ark_impl.fix_variables(&[r_as_field])`, /// }; /// Only it does it in place +/// # Panics +/// Panics if `num_vars` is less than or equal to 0, indicating an invalid size of the partial point. fn in_place_fix_variable(multiplicand: &mut [S], r_as_field: S, num_vars: usize) { assert!(num_vars > 0, "invalid size of partial point"); for b in 0..(1 << num_vars) { diff --git a/crates/proof-of-sql/src/sql/parse/filter_exec_builder.rs b/crates/proof-of-sql/src/sql/parse/filter_exec_builder.rs index b7a85405b..2374695cc 100644 --- a/crates/proof-of-sql/src/sql/parse/filter_exec_builder.rs +++ b/crates/proof-of-sql/src/sql/parse/filter_exec_builder.rs @@ -45,6 +45,10 @@ impl FilterExecBuilder { Ok(self) } + /// # Panics + /// + /// Will panic if: + /// - `self.column_mapping.get(alias)` returns `None`, which can occur if the alias is not found in the column mapping. pub fn add_result_columns(mut self, columns: &[EnrichedExpr]) -> Self { // If a column is provable, add it to the filter result expression list // If at least one column is non-provable, add all columns from the column mapping to the filter result expression list @@ -59,6 +63,7 @@ impl FilterExecBuilder { has_nonprovable_column = true; } } + if has_nonprovable_column { // Has to keep them sorted to have deterministic order for tests for alias in self.column_mapping.keys().sorted() { @@ -72,6 +77,7 @@ impl FilterExecBuilder { self } + #[allow(clippy::missing_panics_doc)] pub fn build(self) -> FilterExec { FilterExec::new( self.filter_result_expr_list, diff --git a/crates/proof-of-sql/src/sql/parse/query_context.rs b/crates/proof-of-sql/src/sql/parse/query_context.rs index 401dc73e3..302a57be6 100644 --- a/crates/proof-of-sql/src/sql/parse/query_context.rs +++ b/crates/proof-of-sql/src/sql/parse/query_context.rs @@ -35,11 +35,13 @@ pub struct QueryContext { } impl QueryContext { + #[allow(clippy::missing_panics_doc)] pub fn set_table_ref(&mut self, table: TableRef) { assert!(self.table.is_none()); self.table = Some(table); } + #[allow(clippy::missing_panics_doc)] pub fn get_table_ref(&self) -> &TableRef { self.table .as_ref() @@ -66,6 +68,7 @@ impl QueryContext { self.in_result_scope } + #[allow(clippy::missing_panics_doc)] pub fn set_in_agg_scope(&mut self, in_agg_scope: bool) -> ConversionResult<()> { if !in_agg_scope { assert!( @@ -114,6 +117,7 @@ impl QueryContext { } } + #[allow(clippy::missing_panics_doc)] pub fn push_aliased_result_expr(&mut self, expr: AliasedResultExpr) -> ConversionResult<()> { assert!(&self.has_visited_group_by, "Group by must be visited first"); self.res_aliased_exprs.push(expr); @@ -153,6 +157,10 @@ impl QueryContext { }) } + /// # Panics + /// + /// Will panic if: + /// - `self.res_aliased_exprs` is empty, triggering the assertion `assert!(!self.res_aliased_exprs.is_empty(), "empty aliased exprs")`. pub fn get_aliased_result_exprs(&self) -> ConversionResult<&[AliasedResultExpr]> { assert!(!self.res_aliased_exprs.is_empty(), "empty aliased exprs"); diff --git a/crates/proof-of-sql/src/sql/parse/query_context_builder.rs b/crates/proof-of-sql/src/sql/parse/query_context_builder.rs index 20883f757..819bbbec6 100644 --- a/crates/proof-of-sql/src/sql/parse/query_context_builder.rs +++ b/crates/proof-of-sql/src/sql/parse/query_context_builder.rs @@ -29,7 +29,7 @@ impl<'a> QueryContextBuilder<'a> { } } - #[allow(clippy::vec_box)] + #[allow(clippy::vec_box, clippy::missing_panics_doc)] pub fn visit_table_expr( mut self, table_expr: Vec>, @@ -102,6 +102,10 @@ impl<'a> QueryContextBuilder<'a> { // Private interface impl<'a> QueryContextBuilder<'a> { + #[allow( + clippy::missing_panics_doc, + reason = "The assertion ensures there is at least one column, and this is a fundamental requirement for schema retrieval." + )] fn lookup_schema(&self) -> Vec<(Identifier, ColumnType)> { let table_ref = self.context.get_table_ref(); let columns = self.schema_accessor.lookup_schema(*table_ref); @@ -135,6 +139,8 @@ impl<'a> QueryContextBuilder<'a> { } } + /// # Panics + /// Panics if the expression is not a column expression. fn visit_column_expr(&mut self, expr: &Expression) -> ConversionResult { let identifier = match expr { Expression::Column(identifier) => *identifier, diff --git a/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs b/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs index e80d9dee4..8ddbcb3cb 100644 --- a/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs +++ b/crates/proof-of-sql/src/sql/parse/query_expr_tests.rs @@ -23,6 +23,11 @@ use proof_of_sql_parser::{ Identifier, }; +/// # Panics +/// +/// Will panic if: +/// - The `parse` method of `SelectStatementParser` fails, causing `unwrap()` to panic. +/// - The `try_new` method of `QueryExpr` fails, causing `unwrap()` to panic. fn query_to_provable_ast( table: TableRef, query: &str, diff --git a/crates/proof-of-sql/src/sql/parse/where_expr_builder_tests.rs b/crates/proof-of-sql/src/sql/parse/where_expr_builder_tests.rs index e7ec19691..e3f967f0a 100644 --- a/crates/proof-of-sql/src/sql/parse/where_expr_builder_tests.rs +++ b/crates/proof-of-sql/src/sql/parse/where_expr_builder_tests.rs @@ -18,6 +18,15 @@ use proof_of_sql_parser::{ Identifier, SelectStatement, }; +/// # Panics +/// +/// Will panic if: +/// - The parsing of the table reference `"sxt.sxt_tab"` fails, which would occur if the input +/// string does not adhere to the expected format for identifiers. This is because `parse()` +/// is called on the identifier string and `unwrap()` is used to handle the result. +/// - The precision used for creating the `Decimal75` column type fails. The `Precision::new(7)` +/// call is expected to succeed; however, if it encounters an invalid precision value, it will +/// cause a panic when `unwrap()` is called. fn get_column_mappings_for_testing() -> IndexMap { let tab_ref = "sxt.sxt_tab".parse().unwrap(); let mut column_mapping = IndexMap::default(); diff --git a/crates/proof-of-sql/src/sql/postprocessing/group_by_postprocessing.rs b/crates/proof-of-sql/src/sql/postprocessing/group_by_postprocessing.rs index 32e162dca..1650b39bb 100644 --- a/crates/proof-of-sql/src/sql/postprocessing/group_by_postprocessing.rs +++ b/crates/proof-of-sql/src/sql/postprocessing/group_by_postprocessing.rs @@ -64,6 +64,10 @@ fn get_free_identifiers_from_expr(expr: &Expression) -> IndexSet { /// The idea here is to recursively traverse the expression tree and collect all the aggregation expressions /// and then label them as new columns post-aggregation and replace them with these new columns so that /// the post-aggregation expression tree doesn't contain any aggregation expressions and can be simply evaluated. +/// # Panics +/// +/// Will panic if the key for an aggregation expression cannot be parsed as a valid identifier +/// or if there are issues retrieving an identifier from the map. fn get_aggregate_and_remainder_expressions( expr: Expression, aggregation_expr_map: &mut IndexMap<(AggregationOperator, Expression), Identifier>, @@ -104,6 +108,9 @@ fn get_aggregate_and_remainder_expressions( } /// Given an `AliasedResultExpr`, check if it is legitimate and if so grab the relevant aggregation expression +/// # Panics +/// +/// Will panic if there is an issue retrieving the first element from the difference of free identifiers and group-by identifiers, indicating a logical inconsistency in the identifiers. fn check_and_get_aggregation_and_remainder( expr: AliasedResultExpr, group_by_identifiers: &[Identifier], diff --git a/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs b/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs index 9519a6e68..71608ec48 100644 --- a/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs +++ b/crates/proof-of-sql/src/sql/postprocessing/test_utility.rs @@ -15,6 +15,10 @@ pub fn group_by_postprocessing( ) } +/// +/// # Panics +/// +/// This function may panic if the internal structures cannot be created properly, although this is unlikely under normal circumstances. pub fn select_expr(result_exprs: &[AliasedResultExpr]) -> OwnedTablePostprocessing { OwnedTablePostprocessing::new_select(SelectPostprocessing::new(result_exprs.to_vec())) } diff --git a/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs b/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs index b0adbd577..c5e6878df 100644 --- a/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs +++ b/crates/proof-of-sql/src/sql/proof/composite_polynomial_builder.rs @@ -21,6 +21,10 @@ pub struct CompositePolynomialBuilder { } impl CompositePolynomialBuilder { + #[allow( + clippy::missing_panics_doc, + reason = "The assertion ensures that the length of 'fr' does not exceed the allowable range based on 'num_sumcheck_variables', making the panic clear from context." + )] pub fn new(num_sumcheck_variables: usize, fr: &[S]) -> Self { assert!(1 << num_sumcheck_variables >= fr.len()); Self { @@ -58,6 +62,10 @@ impl CompositePolynomialBuilder { } /// Produce a polynomial term of the form /// mult * term1(X1, ..., Xr) * ... * termK(X1, ..., Xr) + #[allow( + clippy::missing_panics_doc, + reason = "The assertion guarantees that terms are not empty, which is inherently clear from the context of this function." + )] pub fn produce_zerosum_multiplicand( &mut self, mult: &S, diff --git a/crates/proof-of-sql/src/sql/proof/proof_builder.rs b/crates/proof-of-sql/src/sql/proof/proof_builder.rs index 5f9a52340..3b011aaa5 100644 --- a/crates/proof-of-sql/src/sql/proof/proof_builder.rs +++ b/crates/proof-of-sql/src/sql/proof/proof_builder.rs @@ -174,6 +174,9 @@ impl<'a, S: Scalar> ProofBuilder<'a, S> { /// Specifically, these are the challenges that the verifier sends to /// the prover after the prover sends the result, but before the prover /// send commitments to the intermediate witness columns. + /// # Panics + /// + /// Will panic if there are no post-result challenges available to pop from the stack. pub fn consume_post_result_challenge(&mut self) -> S { self.post_result_challenges.pop().unwrap() } diff --git a/crates/proof-of-sql/src/sql/proof/provable_query_result.rs b/crates/proof-of-sql/src/sql/proof/provable_query_result.rs index bf2d0c93e..a59008e1e 100644 --- a/crates/proof-of-sql/src/sql/proof/provable_query_result.rs +++ b/crates/proof-of-sql/src/sql/proof/provable_query_result.rs @@ -77,8 +77,16 @@ impl ProvableQueryResult { } } + #[allow( + clippy::missing_panics_doc, + reason = "Assertions ensure preconditions are met, eliminating the possibility of panic." + )] /// Given an evaluation vector, compute the evaluation of the intermediate result /// columns as spare multilinear extensions + /// + /// # Panics + /// This function will panic if the length of `evaluation_point` does not match `self.num_columns`. + /// It will also panic if the `data` array is not properly formatted for the expected column types. pub fn evaluate( &self, evaluation_point: &[S], @@ -136,6 +144,10 @@ impl ProvableQueryResult { Ok(res) } + #[allow( + clippy::missing_panics_doc, + reason = "Assertions ensure preconditions are met, eliminating the possibility of panic." + )] /// Convert the intermediate query result into a final query result /// /// The result is essentially an `OwnedTable` type. diff --git a/crates/proof-of-sql/src/sql/proof/query_proof_test.rs b/crates/proof-of-sql/src/sql/proof/query_proof_test.rs index 293f4b1ec..6460fbb9b 100644 --- a/crates/proof-of-sql/src/sql/proof/query_proof_test.rs +++ b/crates/proof-of-sql/src/sql/proof/query_proof_test.rs @@ -99,6 +99,10 @@ impl ProofPlan for TrivialTestProofPlan { ); Ok(vec![C::Scalar::ZERO]) } + /// + /// # Panics + /// + /// This method will panic if the `ColumnField` cannot be created from the provided column name (e.g., if the name parsing fails). fn get_column_result_fields(&self) -> Vec { vec![ColumnField::new("a1".parse().unwrap(), ColumnType::BigInt)] } diff --git a/crates/proof-of-sql/src/sql/proof/sumcheck_mle_evaluations.rs b/crates/proof-of-sql/src/sql/proof/sumcheck_mle_evaluations.rs index 7880ade56..d03b265a2 100644 --- a/crates/proof-of-sql/src/sql/proof/sumcheck_mle_evaluations.rs +++ b/crates/proof-of-sql/src/sql/proof/sumcheck_mle_evaluations.rs @@ -31,6 +31,10 @@ pub struct SumcheckMleEvaluations<'a, S: Scalar> { pub pcs_proof_evaluations: &'a [S], } +#[allow( + clippy::missing_panics_doc, + reason = "Assertions ensure preconditions are met, eliminating the possibility of panic." +)] impl<'a, S: Scalar> SumcheckMleEvaluations<'a, S> { /// Constructs the evaluations for the sumcheck MLEs. /// diff --git a/crates/proof-of-sql/src/sql/proof/verifiable_query_result.rs b/crates/proof-of-sql/src/sql/proof/verifiable_query_result.rs index 5221f2db4..8a65d66e7 100644 --- a/crates/proof-of-sql/src/sql/proof/verifiable_query_result.rs +++ b/crates/proof-of-sql/src/sql/proof/verifiable_query_result.rs @@ -108,7 +108,12 @@ impl VerifiableQueryResult { /// Note: a verified result can still respresent an error (e.g. overflow), but it is a verified /// error. /// - /// Note: This does NOT transform the result! + /// Note: This does NOT transform the result!4 + /// # Panics + /// - Panics if: + /// - `self.provable_result` is `None` but `self.proof` is `Some()`, or vice versa. + /// - `self.proof.as_ref().unwrap()` is called but `self.proof` is `None`. + /// - `self.provable_result.as_ref().unwrap()` is called but `self.provable_result` is `None`. pub fn verify( &self, expr: &(impl ProofPlan + Serialize), @@ -136,7 +141,6 @@ impl VerifiableQueryResult { error: "non-zero sumcheck variables but empty result", })?; } - self.proof.as_ref().unwrap().verify( expr, accessor, diff --git a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs index d32d484de..a2763477b 100644 --- a/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs +++ b/crates/proof-of-sql/src/sql/proof/verifiable_query_result_test_utility.rs @@ -19,6 +19,14 @@ use serde::Serialize; /// verification fails. /// /// It's useful as a tool for testing proof code. +/// +/// # Panics +/// +/// Will panic if: +/// - The verification of `res` does not succeed, causing the assertion `assert!(res.verify(...).is_ok())` to fail. +/// - `res.proof` is `None`, causing `res.proof.as_ref().unwrap()` to panic. +/// - Attempting to modify `pcs_proof_evaluations` or `commitments` if `res_p.proof` is `None`, leading to a panic on `unwrap()`. +/// - `fake_accessor.update_offset` fails, causing a panic if it is designed to do so in the implementation. pub fn exercise_verification( res: &VerifiableQueryResult, expr: &(impl ProofPlan + Serialize), @@ -113,6 +121,15 @@ fn tamper_empty_result( assert!(res_p.verify(expr, accessor, &()).is_err()); } +/// # Panics +/// +/// Will panic if: +/// - `res.provable_result` is `None`, which leads to calling `unwrap()` on it in the subsequent +/// code and may cause an unexpected behavior. +/// - The `provable_res.indexes()` returns an empty vector, which leads to attempting to modify an +/// index of an empty result, causing an invalid state. +/// - The assertion `assert!(res_p.verify(expr, accessor, &()).is_err())` fails, indicating that the +/// verification did not fail as expected after tampering. fn tamper_result( res: &VerifiableQueryResult, expr: &(impl ProofPlan + Serialize), diff --git a/crates/proof-of-sql/src/sql/proof/verification_builder.rs b/crates/proof-of-sql/src/sql/proof/verification_builder.rs index 12bd27d17..532914584 100644 --- a/crates/proof-of-sql/src/sql/proof/verification_builder.rs +++ b/crates/proof-of-sql/src/sql/proof/verification_builder.rs @@ -28,6 +28,10 @@ pub struct VerificationBuilder<'a, C: Commitment> { } impl<'a, C: Commitment> VerificationBuilder<'a, C> { + #[allow( + clippy::missing_panics_doc, + reason = "The only possible panic is from the assertion comparing lengths, which is clear from context." + )] pub fn new( generator_offset: usize, mle_evaluations: SumcheckMleEvaluations<'a, C::Scalar>, @@ -112,24 +116,41 @@ impl<'a, C: Commitment> VerificationBuilder<'a, C> { self.produced_subpolynomials += 1; } + #[allow( + clippy::missing_panics_doc, + reason = "The panic condition is clear due to the assertion that checks if the computation is completed." + )] /// Get the evaluation of the sumcheck polynomial at its randomly selected point pub fn sumcheck_evaluation(&self) -> C::Scalar { assert!(self.completed()); self.sumcheck_evaluation } + #[allow( + clippy::missing_panics_doc, + reason = "Panic conditions are straightforward as they rely on the completion assertion." + )] /// Get the commitments of pre-result MLE vectors used in a verifiable query's /// bulletproof pub fn pcs_proof_commitments(&self) -> &[C] { assert!(self.completed()); &self.pcs_proof_commitments } + + #[allow( + clippy::missing_panics_doc, + reason = "Panic conditions are self-evident from the completed assertion." + )] /// Get folding factors for the pre-result commitments pub fn inner_product_multipliers(&self) -> &[C::Scalar] { assert!(self.completed()); self.inner_product_multipliers } + #[allow( + clippy::missing_panics_doc, + reason = "The panic condition is evident due to the assertion ensuring completion." + )] /// Get the evaluation of the folded pre-result MLE vectors used in a verifiable query's /// bulletproof pub fn folded_pcs_proof_evaluation(&self) -> C::Scalar { @@ -152,6 +173,13 @@ impl<'a, C: Commitment> VerificationBuilder<'a, C> { /// Specifically, these are the challenges that the verifier sends to /// the prover after the prover sends the result, but before the prover /// send commitments to the intermediate witness columns. + /// + /// # Panics + /// This function will panic if there are no post-result challenges available to pop from the stack. + /// + /// # Panics + /// This function will panic if `post_result_challenges` is empty, + /// as it attempts to pop an element from the vector and unwraps the result. pub fn consume_post_result_challenge(&mut self) -> C::Scalar { self.post_result_challenges.pop().unwrap() } diff --git a/crates/proof-of-sql/src/sql/proof_exprs/bitwise_verification.rs b/crates/proof-of-sql/src/sql/proof_exprs/bitwise_verification.rs index f8fb749f1..37d0d719c 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/bitwise_verification.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/bitwise_verification.rs @@ -18,6 +18,10 @@ pub fn is_within_acceptable_range(dist: &BitDistribution) -> bool { dist.most_significant_abs_bit() <= 128 } +#[allow( + clippy::missing_panics_doc, + reason = "All assertions check for validity within the context, ensuring no panic can occur" +)] /// Given a bit distribution for a column of data with a constant sign, the evaluation of a column /// of ones, the constant column's evaluation, and the evaluation of varying absolute bits, verify /// that the bit distribution is correct. @@ -51,6 +55,10 @@ pub fn verify_constant_sign_decomposition( } } +#[allow( + clippy::missing_panics_doc, + reason = "The assertion checks ensure that conditions are valid, preventing panics" +)] pub fn verify_constant_abs_decomposition( dist: &BitDistribution, eval: S, diff --git a/crates/proof-of-sql/src/sql/proof_exprs/comparison_util.rs b/crates/proof-of-sql/src/sql/proof_exprs/comparison_util.rs index 89b60b9b6..b95994d9d 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/comparison_util.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/comparison_util.rs @@ -30,6 +30,10 @@ fn unchecked_subtract_impl<'a, S: Scalar>( Ok(result) } +#[allow( + clippy::missing_panics_doc, + reason = "precision and scale are validated prior to calling this function, ensuring no panic occurs" +)] /// Scale LHS and RHS to the same scale if at least one of them is decimal /// and take the difference. This function is used for comparisons. pub(crate) fn scale_and_subtract<'a, S: Scalar>( diff --git a/crates/proof-of-sql/src/sql/proof_exprs/equals_expr.rs b/crates/proof-of-sql/src/sql/proof_exprs/equals_expr.rs index 9275f7231..a845076fd 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/equals_expr.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/equals_expr.rs @@ -91,6 +91,10 @@ impl ProofExpr for EqualsExpr { } } +#[allow( + clippy::missing_panics_doc, + reason = "table_length is guaranteed to match lhs.len()" +)] pub fn result_evaluate_equals_zero<'a, S: Scalar>( table_length: usize, alloc: &'a Bump, diff --git a/crates/proof-of-sql/src/sql/proof_exprs/numerical_util.rs b/crates/proof-of-sql/src/sql/proof_exprs/numerical_util.rs index f9a415c2e..305be291a 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/numerical_util.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/numerical_util.rs @@ -1,6 +1,10 @@ use crate::base::{database::Column, math::decimal::scale_scalar, scalar::Scalar}; use bumpalo::Bump; +#[allow( + clippy::missing_panics_doc, + reason = "lhs and rhs are guaranteed to have the same length by design, ensuring no panic occurs" +)] /// Add or subtract two columns together. pub(crate) fn add_subtract_columns<'a, S: Scalar>( lhs: Column<'a, S>, @@ -30,6 +34,8 @@ pub(crate) fn add_subtract_columns<'a, S: Scalar>( } /// Multiply two columns together. +/// # Panics +/// Panics if: The lengths of `lhs` and `rhs` are not equal.`lhs.scalar_at(i)` or `rhs.scalar_at(i)` returns `None`, which occurs if the column does not have, a scalar at the given index `i`. pub(crate) fn multiply_columns<'a, S: Scalar>( lhs: &Column<'a, S>, rhs: &Column<'a, S>, @@ -46,6 +52,10 @@ pub(crate) fn multiply_columns<'a, S: Scalar>( }) } +#[allow( + clippy::missing_panics_doc, + reason = "scaling factor is guaranteed to not be negative based on input validation prior to calling this function" +)] /// The counterpart of `add_subtract_columns` for evaluating decimal expressions. pub(crate) fn scale_and_add_subtract_eval( lhs_eval: S, diff --git a/crates/proof-of-sql/src/sql/proof_exprs/or_expr.rs b/crates/proof-of-sql/src/sql/proof_exprs/or_expr.rs index c215a1bc6..efdeb3d5a 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/or_expr.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/or_expr.rs @@ -86,6 +86,10 @@ impl ProofExpr for OrExpr { } } +#[allow( + clippy::missing_panics_doc, + reason = "table_length matches lhs and rhs lengths, ensuring no panic occurs" +)] pub fn result_evaluate_or<'a>( table_length: usize, alloc: &'a Bump, @@ -97,6 +101,10 @@ pub fn result_evaluate_or<'a>( alloc.alloc_slice_fill_with(table_length, |i| lhs[i] || rhs[i]) } +#[allow( + clippy::missing_panics_doc, + reason = "lhs and rhs are guaranteed to have the same length, ensuring no panic occurs" +)] pub fn prover_evaluate_or<'a, S: Scalar>( builder: &mut ProofBuilder<'a, S>, alloc: &'a Bump, diff --git a/crates/proof-of-sql/src/sql/proof_exprs/sign_expr.rs b/crates/proof-of-sql/src/sql/proof_exprs/sign_expr.rs index e957c0b3d..6a8cc601f 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/sign_expr.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/sign_expr.rs @@ -39,6 +39,9 @@ pub fn count_sign(builder: &mut CountBuilder) -> Result<(), ProofError> { /// Compute the sign bit for a column of scalars. /// +/// # Panics +/// Panics if `bits.last()` is `None` or if `result.len()` does not match `table_length`. +/// /// todo! make this more efficient and targeted at just the sign bit rather than all bits to create a proof pub fn result_evaluate_sign<'a, S: Scalar>( table_length: usize, @@ -67,6 +70,9 @@ pub fn result_evaluate_sign<'a, S: Scalar>( /// Prove the sign decomposition for a column of scalars. /// +/// # Panics +/// Panics if `bits.last()` is `None`. +/// /// If x1, ..., xn denotes the data, prove the column of /// booleans, i.e. sign bits, s1, ..., sn where si == 1 if xi > MID and /// `si == 1` if `xi <= MID` and `MID` is defined in `base/bit/abs_bit_mask.rs` @@ -108,11 +114,15 @@ pub fn prover_evaluate_sign<'a, S: Scalar>( prove_bit_decomposition(builder, alloc, expr, &bits, &dist); } + // This might panic if `bits.last()` returns `None`. bits.last().unwrap() } /// Verify the sign decomposition for a column of scalars. /// +/// # Panics +/// Panics if `bit_evals.last()` is `None`. +/// /// See [`prover_evaluate_sign`]. pub fn verifier_evaluate_sign( builder: &mut VerificationBuilder, @@ -188,6 +198,10 @@ fn verify_bits_are_binary( } } +/// # Panics +/// Panics if `bits.last()` returns `None`. +/// +/// This function generates subpolynomial terms for sumcheck, involving the scalar expression and its bit decomposition. fn prove_bit_decomposition<'a, S: Scalar>( builder: &mut ProofBuilder<'a, S>, alloc: &'a Bump, @@ -221,6 +235,10 @@ fn prove_bit_decomposition<'a, S: Scalar>( builder.produce_sumcheck_subpolynomial(SumcheckSubpolynomialType::Identity, terms); } +/// # Panics +/// Panics if `bit_evals.last()` returns `None`. +/// +/// This function checks the consistency of the bit evaluations with the expression evaluation. fn verify_bit_decomposition( builder: &mut VerificationBuilder<'_, C>, expr_eval: C::Scalar, diff --git a/crates/proof-of-sql/src/sql/proof_exprs/test_utility.rs b/crates/proof-of-sql/src/sql/proof_exprs/test_utility.rs index 90d283f1f..5ea499294 100644 --- a/crates/proof-of-sql/src/sql/proof_exprs/test_utility.rs +++ b/crates/proof-of-sql/src/sql/proof_exprs/test_utility.rs @@ -6,12 +6,19 @@ use crate::base::{ }; use proof_of_sql_parser::intermediate_ast::AggregationOperator; +/// # Panics +/// Panics if: +/// - `name.parse()` fails, which means the provided string could not be parsed into the expected type (usually an `Identifier`). pub fn col_ref(tab: TableRef, name: &str, accessor: &impl SchemaAccessor) -> ColumnRef { let name = name.parse().unwrap(); let type_col = accessor.lookup_column(tab, name).unwrap(); ColumnRef::new(tab, name, type_col) } +/// # Panics +/// Panics if: +/// - `name.parse()` fails to parse the column name. +/// - `accessor.lookup_column()` returns `None`, indicating the column is not found. pub fn column( tab: TableRef, name: &str, @@ -22,38 +29,65 @@ pub fn column( DynProofExpr::Column(ColumnExpr::new(ColumnRef::new(tab, name, type_col))) } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_equals()` returns an error. pub fn equal(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_equals(left, right).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_inequality()` returns an error. pub fn lte(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_inequality(left, right, true).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_inequality()` returns an error. pub fn gte(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_inequality(left, right, false).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_not()` returns an error. pub fn not(expr: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_not(expr).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_and()` returns an error. pub fn and(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_and(left, right).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_or()` returns an error. pub fn or(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_or(left, right).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_add()` returns an error. pub fn add(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_add(left, right).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_subtract()` returns an error. pub fn subtract(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_subtract(left, right).unwrap() } +/// # Panics +/// Panics if: +/// - `DynProofExpr::try_new_multiply()` returns an error. pub fn multiply(left: DynProofExpr, right: DynProofExpr) -> DynProofExpr { DynProofExpr::try_new_multiply(left, right).unwrap() } @@ -91,6 +125,9 @@ pub fn const_scalar>(val: T) -> DynProofExpr>( precision: u8, scale: i8, @@ -107,6 +144,9 @@ pub fn tab(tab: TableRef) -> TableExpr { TableExpr { table_ref: tab } } +/// # Panics +/// Panics if: +/// - `alias.parse()` fails to parse the provided alias string. pub fn aliased_plan(expr: DynProofExpr, alias: &str) -> AliasedDynProofExpr { AliasedDynProofExpr { expr, @@ -114,6 +154,10 @@ pub fn aliased_plan(expr: DynProofExpr, alias: &str) -> Aliase } } +/// # Panics +/// Panics if: +/// - `old_name.parse()` or `new_name.parse()` fails to parse the provided column names. +/// - `col_ref()` fails to find the referenced column, leading to a panic in the column accessor. pub fn aliased_col_expr_plan( tab: TableRef, old_name: &str, @@ -126,6 +170,10 @@ pub fn aliased_col_expr_plan( } } +/// # Panics +/// Panics if: +/// - `name.parse()` fails to parse the provided column name. +/// - `col_ref()` fails to find the referenced column, leading to a panic in the column accessor. pub fn col_expr_plan( tab: TableRef, name: &str, @@ -178,6 +226,9 @@ pub fn cols_expr( .collect() } +/// # Panics +/// Panics if: +/// - `alias.parse()` fails to parse the provided alias string. pub fn sum_expr(expr: DynProofExpr, alias: &str) -> AliasedDynProofExpr { AliasedDynProofExpr { expr: DynProofExpr::new_aggregate(AggregationOperator::Sum, expr), diff --git a/crates/proof-of-sql/src/sql/proof_plans/group_by_exec.rs b/crates/proof-of-sql/src/sql/proof_plans/group_by_exec.rs index f05f2dde7..07d187c31 100644 --- a/crates/proof-of-sql/src/sql/proof_plans/group_by_exec.rs +++ b/crates/proof-of-sql/src/sql/proof_plans/group_by_exec.rs @@ -363,6 +363,10 @@ fn verify_group_by( Ok(()) } +#[allow( + clippy::missing_panics_doc, + reason = "alpha is guaranteed to not be zero in this context" +)] pub fn prove_group_by<'a, S: Scalar>( builder: &mut ProofBuilder<'a, S>, alloc: &'a Bump, diff --git a/crates/proof-of-sql/src/sql/proof_plans/test_utility.rs b/crates/proof-of-sql/src/sql/proof_plans/test_utility.rs index c33cf9b22..6f348679b 100644 --- a/crates/proof-of-sql/src/sql/proof_plans/test_utility.rs +++ b/crates/proof-of-sql/src/sql/proof_plans/test_utility.rs @@ -19,6 +19,9 @@ pub fn filter( DynProofPlan::Filter(FilterExec::new(results, table, where_clause)) } +/// # Panics +/// +/// Will panic if `count_alias` cannot be parsed as a valid identifier. pub fn group_by( group_by_exprs: Vec>, sum_expr: Vec>, diff --git a/crates/proof-of-sql/tests/decimal_integration_tests.rs b/crates/proof-of-sql/tests/decimal_integration_tests.rs index 522ac6d93..20e8146c5 100644 --- a/crates/proof-of-sql/tests/decimal_integration_tests.rs +++ b/crates/proof-of-sql/tests/decimal_integration_tests.rs @@ -1,4 +1,5 @@ #![cfg(feature = "test")] +#![cfg_attr(test, allow(clippy::missing_panics_doc))] #[cfg(feature = "blitzar")] use blitzar::proof::InnerProductProof; #[cfg(feature = "blitzar")] diff --git a/crates/proof-of-sql/tests/integration_tests.rs b/crates/proof-of-sql/tests/integration_tests.rs index 1017e1211..b0c87b7a5 100644 --- a/crates/proof-of-sql/tests/integration_tests.rs +++ b/crates/proof-of-sql/tests/integration_tests.rs @@ -1,4 +1,5 @@ #![cfg(feature = "test")] +#![cfg_attr(test, allow(clippy::missing_panics_doc))] use ark_std::test_rng; use curve25519_dalek::RistrettoPoint; #[cfg(feature = "blitzar")] diff --git a/crates/proof-of-sql/tests/timestamp_integration_tests.rs b/crates/proof-of-sql/tests/timestamp_integration_tests.rs index 7aaa698ec..aeb4ea0b1 100644 --- a/crates/proof-of-sql/tests/timestamp_integration_tests.rs +++ b/crates/proof-of-sql/tests/timestamp_integration_tests.rs @@ -1,4 +1,5 @@ #![cfg(feature = "test")] +#![cfg_attr(test, allow(clippy::missing_panics_doc))] use ark_std::test_rng; #[cfg(feature = "blitzar")] use proof_of_sql::base::commitment::InnerProductProof;