Skip to content

Commit

Permalink
chore: major resolved clippy::needless_pass_by_value lint warnings (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
iajoiner authored Oct 16, 2024
2 parents 187a304 + 2e82d36 commit c749566
Show file tree
Hide file tree
Showing 29 changed files with 91 additions and 81 deletions.
3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,6 @@ match_wildcard_for_single_variants = "deny"
match_bool = "deny"
manual_assert = "deny"
trivially_copy_pass_by_ref = "deny"
unnecessary_wraps = "deny"
should_panic_without_expect = "deny"
needless_pass_by_value = "deny"
4 changes: 2 additions & 2 deletions crates/proof-of-sql-parser/src/identifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -262,13 +262,13 @@ mod tests {
}

#[test]
#[should_panic]
#[should_panic(expected = "Identifier too long: CapacityError: insufficient capacity")]
fn long_names_panic() {
Identifier::new("t".repeat(65));
}

#[test]
#[should_panic]
#[should_panic(expected = "Identifier too long: CapacityError: insufficient capacity")]
fn long_unicode_names_panic() {
Identifier::new("茶".repeat(22));
}
Expand Down
1 change: 1 addition & 0 deletions crates/proof-of-sql-parser/src/intermediate_ast_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -779,6 +779,7 @@ fn we_can_parse_multiple_order_by() {
// TODO: we should be able to pass this test.
// But due to some lalrpop restriction, we aren't.
// This problem will be addressed in a future PR.
#[allow(clippy::should_panic_without_expect)]
#[test]
#[should_panic]
fn we_cannot_parse_order_by_referencing_reserved_keywords_yet() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ mod tests {
use itertools::Itertools;

fn metadata_map_from_owned_table(
table: OwnedTable<Curve25519Scalar>,
table: &OwnedTable<Curve25519Scalar>,
) -> ColumnCommitmentMetadataMap {
let (identifiers, columns): (Vec<&Identifier>, Vec<CommittableColumn>) = table
.inner_table()
Expand All @@ -171,7 +171,7 @@ mod tests {
scalar("scalar_column", [1000, 2000, -1000, 0]),
]);

let metadata_map = metadata_map_from_owned_table(table);
let metadata_map = metadata_map_from_owned_table(&table);

assert_eq!(metadata_map.len(), 4);

Expand Down Expand Up @@ -214,23 +214,23 @@ mod tests {
varchar("varchar_column", ["Lorem", "ipsum"]),
scalar("scalar_column", [1000, 2000]),
]);
let metadata_a = metadata_map_from_owned_table(table_a);
let metadata_a = metadata_map_from_owned_table(&table_a);

let table_b = owned_table([
bigint("bigint_column", [-5, 0, 10]),
int128("int128_column", [300, 400, 500]),
varchar("varchar_column", ["dolor", "sit", "amet"]),
scalar("scalar_column", [-1000, 0, -2000]),
]);
let metadata_b = metadata_map_from_owned_table(table_b);
let metadata_b = metadata_map_from_owned_table(&table_b);

let table_c = owned_table([
bigint("bigint_column", [1, 5, -5, 0, 10]),
int128("int128_column", [100, 200, 300, 400, 500]),
varchar("varchar_column", ["Lorem", "ipsum", "dolor", "sit", "amet"]),
scalar("scalar_column", [1000, 2000, -1000, 0, -2000]),
]);
let metadata_c = metadata_map_from_owned_table(table_c);
let metadata_c = metadata_map_from_owned_table(&table_c);

assert_eq!(metadata_a.try_union(metadata_b).unwrap(), metadata_c);
}
Expand All @@ -242,15 +242,15 @@ mod tests {
varchar("varchar_column", ["Lorem", "ipsum"]),
scalar("scalar_column", [1000, 2000]),
]);
let metadata_a = metadata_map_from_owned_table(table_a);
let metadata_a = metadata_map_from_owned_table(&table_a);

let table_b = owned_table([
bigint("bigint_column", [1, 5, -5, 0, 10]),
int128("int128_column", [100, 200, 300, 400, 500]),
varchar("varchar_column", ["Lorem", "ipsum", "dolor", "sit", "amet"]),
scalar("scalar_column", [1000, 2000, -1000, 0, -2000]),
]);
let metadata_b = metadata_map_from_owned_table(table_b);
let metadata_b = metadata_map_from_owned_table(&table_b);

let b_difference_a = metadata_b.try_difference(metadata_a.clone()).unwrap();

Expand Down Expand Up @@ -297,13 +297,13 @@ mod tests {
varchar("varchar_column", ["Lorem", "ipsum", "dolor", "sit", "amet"]),
scalar("scalar_column", [1000, 2000, -1000, 0, -2000]),
]);
let metadata_a = metadata_map_from_owned_table(table_a);
let metadata_a = metadata_map_from_owned_table(&table_a);

let table_b = owned_table([
bigint("bigint_column", [1, 5, -5, 0, 10]),
varchar("varchar_column", ["Lorem", "ipsum", "dolor", "sit", "amet"]),
]);
let metadata_b = metadata_map_from_owned_table(table_b);
let metadata_b = metadata_map_from_owned_table(&table_b);

assert!(matches!(
metadata_a.clone().try_union(metadata_b.clone()),
Expand Down Expand Up @@ -337,25 +337,25 @@ mod tests {
let strings = ["Lorem", "ipsum", "dolor", "sit"];

let ab_ii_metadata =
metadata_map_from_owned_table(owned_table([bigint(id_a, ints), bigint(id_b, ints)]));
metadata_map_from_owned_table(&owned_table([bigint(id_a, ints), bigint(id_b, ints)]));

let ab_iv_metadata = metadata_map_from_owned_table(owned_table([
let ab_iv_metadata = metadata_map_from_owned_table(&owned_table([
bigint(id_a, ints),
varchar(id_b, strings),
]));

let ab_vi_metadata = metadata_map_from_owned_table(owned_table([
let ab_vi_metadata = metadata_map_from_owned_table(&owned_table([
varchar(id_a, strings),
bigint(id_b, ints),
]));

let ad_ii_metadata =
metadata_map_from_owned_table(owned_table([bigint(id_a, ints), bigint(id_d, ints)]));
metadata_map_from_owned_table(&owned_table([bigint(id_a, ints), bigint(id_d, ints)]));

let cb_ii_metadata =
metadata_map_from_owned_table(owned_table([bigint(id_c, ints), bigint(id_b, ints)]));
metadata_map_from_owned_table(&owned_table([bigint(id_c, ints), bigint(id_b, ints)]));

let cd_vv_metadata = metadata_map_from_owned_table(owned_table([
let cd_vv_metadata = metadata_map_from_owned_table(&owned_table([
varchar(id_c, strings),
varchar(id_d, strings),
]));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,30 +15,30 @@ use arrow::{
};

fn we_can_convert_between_owned_column_and_array_ref_impl(
owned_column: OwnedColumn<Curve25519Scalar>,
owned_column: &OwnedColumn<Curve25519Scalar>,
array_ref: ArrayRef,
) {
let ic_to_ar = ArrayRef::from(owned_column.clone());
let ar_to_ic = OwnedColumn::try_from(array_ref.clone()).unwrap();

assert!(ic_to_ar == array_ref);
assert_eq!(owned_column, ar_to_ic);
assert_eq!(*owned_column, ar_to_ic);
}
fn we_can_convert_between_boolean_owned_column_and_array_ref_impl(data: Vec<bool>) {
we_can_convert_between_owned_column_and_array_ref_impl(
OwnedColumn::<Curve25519Scalar>::Boolean(data.clone()),
&OwnedColumn::<Curve25519Scalar>::Boolean(data.clone()),
Arc::new(BooleanArray::from(data)),
);
}
fn we_can_convert_between_bigint_owned_column_and_array_ref_impl(data: Vec<i64>) {
we_can_convert_between_owned_column_and_array_ref_impl(
OwnedColumn::<Curve25519Scalar>::BigInt(data.clone()),
&OwnedColumn::<Curve25519Scalar>::BigInt(data.clone()),
Arc::new(Int64Array::from(data)),
);
}
fn we_can_convert_between_int128_owned_column_and_array_ref_impl(data: Vec<i128>) {
we_can_convert_between_owned_column_and_array_ref_impl(
OwnedColumn::<Curve25519Scalar>::Int128(data.clone()),
&OwnedColumn::<Curve25519Scalar>::Int128(data.clone()),
Arc::new(
Decimal128Array::from(data)
.with_precision_and_scale(38, 0)
Expand All @@ -48,7 +48,7 @@ fn we_can_convert_between_int128_owned_column_and_array_ref_impl(data: Vec<i128>
}
fn we_can_convert_between_varchar_owned_column_and_array_ref_impl(data: Vec<String>) {
we_can_convert_between_owned_column_and_array_ref_impl(
OwnedColumn::<Curve25519Scalar>::VarChar(data.clone()),
&OwnedColumn::<Curve25519Scalar>::VarChar(data.clone()),
Arc::new(StringArray::from(data)),
);
}
Expand Down Expand Up @@ -81,37 +81,37 @@ fn we_get_an_unsupported_type_error_when_trying_to_convert_from_a_float32_array_
}

fn we_can_convert_between_owned_table_and_record_batch_impl(
owned_table: OwnedTable<Curve25519Scalar>,
record_batch: RecordBatch,
owned_table: &OwnedTable<Curve25519Scalar>,
record_batch: &RecordBatch,
) {
let it_to_rb = RecordBatch::try_from(owned_table.clone()).unwrap();
let rb_to_it = OwnedTable::try_from(record_batch.clone()).unwrap();

assert_eq!(it_to_rb, record_batch);
assert_eq!(rb_to_it, owned_table);
assert_eq!(it_to_rb, *record_batch);
assert_eq!(rb_to_it, *owned_table);
}
#[test]
fn we_can_convert_between_owned_table_and_record_batch() {
we_can_convert_between_owned_table_and_record_batch_impl(
OwnedTable::<Curve25519Scalar>::try_new(IndexMap::default()).unwrap(),
RecordBatch::new_empty(Arc::new(Schema::empty())),
&OwnedTable::<Curve25519Scalar>::try_new(IndexMap::default()).unwrap(),
&RecordBatch::new_empty(Arc::new(Schema::empty())),
);
we_can_convert_between_owned_table_and_record_batch_impl(
owned_table([
&owned_table([
bigint("int64", [0; 0]),
int128("int128", [0; 0]),
varchar("string", ["0"; 0]),
boolean("boolean", [true; 0]),
]),
record_batch!(
&record_batch!(
"int64" => [0_i64; 0],
"int128" => [0_i128; 0],
"string" => ["0"; 0],
"boolean" => [true; 0],
),
);
we_can_convert_between_owned_table_and_record_batch_impl(
owned_table([
&owned_table([
bigint("int64", [0, 1, 2, 3, 4, 5, 6, i64::MIN, i64::MAX]),
int128("int128", [0, 1, 2, 3, 4, 5, 6, i128::MIN, i128::MAX]),
varchar("string", ["0", "1", "2", "3", "4", "5", "6", "7", "8"]),
Expand All @@ -120,7 +120,7 @@ fn we_can_convert_between_owned_table_and_record_batch() {
[true, false, true, false, true, false, true, false, true],
),
]),
record_batch!(
&record_batch!(
"int64" => [0_i64, 1, 2, 3, 4, 5, 6, i64::MIN, i64::MAX],
"int128" => [0_i128, 1, 2, 3, 4, 5, 6, i128::MIN, i128::MAX],
"string" => ["0", "1", "2", "3", "4", "5", "6", "7", "8"],
Expand All @@ -142,7 +142,7 @@ fn we_cannot_convert_a_record_batch_if_it_has_repeated_column_names() {
}

#[test]
#[should_panic]
#[should_panic(expected = "not implemented: Cannot convert Scalar type to arrow type")]
fn we_panic_when_converting_an_owned_table_with_a_scalar_column() {
let owned_table = owned_table::<Curve25519Scalar>([scalar("a", [0; 0])]);
let _ = RecordBatch::try_from(owned_table);
Expand Down
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/base/slice_ops/mul_add_assign.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ where
T: Send + Sync + Mul<Output = T> + AddAssign + Copy,
S: Into<T> + Sync + Copy,
{
assert!(result.len() >= to_mul_add.len());
assert!(result.len() >= to_mul_add.len(), "The length of result must be greater than or equal to the length of the vector of values to be multiplied and added");
if_rayon!(
result.par_iter_mut().with_min_len(super::MIN_RAYON_LEN),
result.iter_mut()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ fn test_mul_add_assign_uneven() {

/// test [`mul_add_assign`] with with uneven panics when len(a) < len(b)
#[test]
#[should_panic]
#[should_panic(
expected = "The length of result must be greater than or equal to the length of the vector of values to be multiplied and added"
)]
fn test_mul_add_assign_uneven_panic() {
let mut a = vec![1, 2, 3, 4];
let b = vec![2, 3, 4, 5, 6];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ pub fn dory_inner_product_prove(
for _ in 0..nu {
dory_reduce_prove(messages, transcript, &mut state, setup);
}
scalar_product_prove(messages, transcript, state);
scalar_product_prove(messages, transcript, &state);
}

/// This is the verifier side of the Dory-Innerproduct algorithm in section 3.3 of <https://eprint.iacr.org/2020/1274.pdf>.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ pub fn extended_dory_inner_product_prove(
extended_dory_reduce_prove(messages, transcript, &mut state, setup);
}
let base_state = fold_scalars_0_prove(messages, transcript, state, setup);
scalar_product_prove(messages, transcript, base_state);
scalar_product_prove(messages, transcript, &base_state);
}

/// This is the verifier side of the extended Dory-Innerproduct algorithm in section 4.3 of https://eprint.iacr.org/2020/1274.pdf.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::base::proof::Transcript;
pub fn scalar_product_prove(
messages: &mut DoryMessages,
transcript: &mut impl Transcript,
state: ProverState,
state: &ProverState,
) {
// See section 3.1 of https://eprint.iacr.org/2020/1274.pdf.
//
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/sql/parse/enriched_expr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@ impl<C: Commitment> EnrichedExpr<C> {
/// and the `residue_expression` will contain the remaining expression.
pub fn new(
expression: AliasedResultExpr,
column_mapping: IndexMap<Identifier, ColumnRef>,
column_mapping: &IndexMap<Identifier, ColumnRef>,
) -> Self {
// TODO: Using new_agg (ironically) disables aggregations in `QueryExpr` for now.
// Re-enable aggregations when we add `GroupByExec` generalizations.
let res_dyn_proof_expr =
DynProofExprBuilder::new_agg(&column_mapping).build(&expression.expr);
DynProofExprBuilder::new_agg(column_mapping).build(&expression.expr);
match res_dyn_proof_expr {
Ok(dyn_proof_expr) => {
let alias = expression.alias;
Expand Down
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/sql/parse/query_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ impl QueryContext {
}
}

#[allow(clippy::missing_panics_doc)]
#[allow(clippy::missing_panics_doc, clippy::unnecessary_wraps)]
pub fn push_aliased_result_expr(&mut self, expr: AliasedResultExpr) -> ConversionResult<()> {
assert!(&self.has_visited_group_by, "Group by must be visited first");
self.res_aliased_exprs.push(expr);
Expand Down
3 changes: 2 additions & 1 deletion crates/proof-of-sql/src/sql/parse/query_context_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ impl<'a> QueryContextBuilder<'a> {
#[allow(clippy::vec_box, clippy::missing_panics_doc)]
pub fn visit_table_expr(
mut self,
table_expr: Vec<Box<TableExpression>>,
table_expr: &[Box<TableExpression>],
default_schema: Identifier,
) -> Self {
assert_eq!(table_expr.len(), 1);
Expand Down Expand Up @@ -95,6 +95,7 @@ impl<'a> QueryContextBuilder<'a> {
Ok(self)
}

#[allow(clippy::unnecessary_wraps)]
pub fn build(self) -> ConversionResult<QueryContext> {
Ok(self.context)
}
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/sql/parse/query_expr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ impl<C: Commitment> QueryExpr<C> {
where_expr,
group_by,
} => QueryContextBuilder::new(schema_accessor)
.visit_table_expr(from, default_schema)
.visit_table_expr(&from, default_schema)
.visit_group_by_exprs(group_by)?
.visit_result_exprs(result_exprs)?
.visit_where_expr(where_expr)?
Expand Down Expand Up @@ -130,7 +130,7 @@ impl<C: Commitment> QueryExpr<C> {
let column_mapping = context.get_column_mapping();
let enriched_exprs = result_aliased_exprs
.iter()
.map(|aliased_expr| EnrichedExpr::new(aliased_expr.clone(), column_mapping.clone()))
.map(|aliased_expr| EnrichedExpr::new(aliased_expr.clone(), &column_mapping))
.collect::<Vec<_>>();
let select_exprs = enriched_exprs
.iter()
Expand Down
Loading

0 comments on commit c749566

Please sign in to comment.