Skip to content

Commit

Permalink
chore: resolve clippy::similar_names, `clippy::many_single_char_nam…
Browse files Browse the repository at this point in the history
…es` & `clippy::module_name_repetitions` lints in `proof-of-sql` (#218)

# Rationale for this change

We have cargo clippy running in our CI in order to enforce code quality.
In order to increase our standards, we should enable the
clippy::pedantic lint group.

# What changes are included in this PR?

This PR fixes `clippy::similar_names`, `clippy::many_single_char_names`
& `clippy::module_name_repetitions` warnings for proof-of-sql lib.

Currently I have completely allowed `clippy::module_name_repetitions`
lint as it appears to be too strict.

# Are these changes tested?

Yes
  • Loading branch information
JayWhite2357 authored Oct 5, 2024
2 parents 540bb82 + 8022530 commit 822887f
Show file tree
Hide file tree
Showing 8 changed files with 62 additions and 50 deletions.
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/base/database/column.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ impl<'a, S: Scalar> Column<'a, S> {
match self {
Self::Boolean(col) => slice_cast_with(col, |b| S::from(b) * scale_factor),
Self::Decimal75(_, _, col) => slice_cast_with(col, |s| *s * scale_factor),
Self::VarChar((_, scals)) => slice_cast_with(scals, |s| *s * scale_factor),
Self::VarChar((_, values)) => slice_cast_with(values, |s| *s * scale_factor),
Self::SmallInt(col) => slice_cast_with(col, |i| S::from(i) * scale_factor),
Self::Int(col) => slice_cast_with(col, |i| S::from(i) * scale_factor),
Self::BigInt(col) => slice_cast_with(col, |i| S::from(i) * scale_factor),
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/base/database/column_operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,7 @@ where
.expect("numeric columns have scale");
let applied_scale = rhs_scale - lhs_scale + new_scale;
let applied_scale_factor = BigInt::from(10).pow(applied_scale.unsigned_abs() as u32);
let res: Vec<S> = lhs
let result: Vec<S> = lhs
.iter()
.zip(rhs)
.map(|(l, r)| -> ColumnOperationResult<S> {
Expand All @@ -886,7 +886,7 @@ where
Ok((
Precision::new(new_precision_value).expect("Precision value is valid"),
new_scale,
res,
result,
))
}

Expand Down
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(clippy::missing_panics_doc)] // Fixed in Issue #163
#![allow(clippy::missing_panics_doc, clippy::module_name_repetitions)]
extern crate alloc;

pub mod base;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ impl<S: Scalar> PostprocessingStep<S> for GroupByPostprocessing {
.collect::<PostprocessingResult<Vec<_>>>()?;
// TODO: Allow a filter
let selection_in = vec![true; owned_table.num_rows()];
let (sum_ids, sum_ins): (Vec<_>, Vec<_>) = evaluated_columns
let (sum_identifiers, sum_columns): (Vec<_>, Vec<_>) = evaluated_columns
.get(&AggregationOperator::Sum)
.map(|tuple| {
tuple
Expand All @@ -232,7 +232,7 @@ impl<S: Scalar> PostprocessingStep<S> for GroupByPostprocessing {
.unzip()
})
.unwrap_or((vec![], vec![]));
let (max_ids, max_ins): (Vec<_>, Vec<_>) = evaluated_columns
let (max_identifiers, max_columns): (Vec<_>, Vec<_>) = evaluated_columns
.get(&AggregationOperator::Max)
.map(|tuple| {
tuple
Expand All @@ -241,7 +241,7 @@ impl<S: Scalar> PostprocessingStep<S> for GroupByPostprocessing {
.unzip()
})
.unwrap_or((vec![], vec![]));
let (min_ids, min_ins): (Vec<_>, Vec<_>) = evaluated_columns
let (min_identifiers, min_columns): (Vec<_>, Vec<_>) = evaluated_columns
.get(&AggregationOperator::Min)
.map(|tuple| {
tuple
Expand All @@ -253,9 +253,9 @@ impl<S: Scalar> PostprocessingStep<S> for GroupByPostprocessing {
let aggregation_results = aggregate_columns(
&alloc,
&group_by_ins,
&sum_ins,
&max_ins,
&min_ins,
&sum_columns,
&max_columns,
&min_columns,
&selection_in,
)?;
// Finally do another round of evaluation to get the final result
Expand All @@ -265,27 +265,39 @@ impl<S: Scalar> PostprocessingStep<S> for GroupByPostprocessing {
.iter()
.zip(self.group_by_identifiers.iter())
.map(|(column, id)| Ok((*id, OwnedColumn::from(column))));
let sum_outs =
izip!(aggregation_results.sum_columns, sum_ids, sum_ins,).map(|(c_out, id, c_in)| {
Ok((
id,
OwnedColumn::try_from_scalars(c_out, c_in.column_type())?,
))
});
let max_outs =
izip!(aggregation_results.max_columns, max_ids, max_ins,).map(|(c_out, id, c_in)| {
Ok((
id,
OwnedColumn::try_from_option_scalars(c_out, c_in.column_type())?,
))
});
let min_outs =
izip!(aggregation_results.min_columns, min_ids, min_ins,).map(|(c_out, id, c_in)| {
Ok((
id,
OwnedColumn::try_from_option_scalars(c_out, c_in.column_type())?,
))
});
let sum_outs = izip!(
aggregation_results.sum_columns,
sum_identifiers,
sum_columns,
)
.map(|(c_out, id, c_in)| {
Ok((
id,
OwnedColumn::try_from_scalars(c_out, c_in.column_type())?,
))
});
let max_outs = izip!(
aggregation_results.max_columns,
max_identifiers,
max_columns,
)
.map(|(c_out, id, c_in)| {
Ok((
id,
OwnedColumn::try_from_option_scalars(c_out, c_in.column_type())?,
))
});
let min_outs = izip!(
aggregation_results.min_columns,
min_identifiers,
min_columns,
)
.map(|(c_out, id, c_in)| {
Ok((
id,
OwnedColumn::try_from_option_scalars(c_out, c_in.column_type())?,
))
});
//TODO: When we have NULLs we need to differentiate between count(1) and count(expression)
let count_column = OwnedColumn::BigInt(aggregation_results.count_column.to_vec());
let count_outs = evaluated_columns
Expand All @@ -307,15 +319,15 @@ impl<S: Scalar> PostprocessingStep<S> for GroupByPostprocessing {
} else {
new_owned_table
};
let res = self
let result = self
.remainder_exprs
.iter()
.map(|aliased_expr| -> PostprocessingResult<_> {
let column = target_table.evaluate(&aliased_expr.expr)?;
Ok((aliased_expr.alias, column))
})
.process_results(|iter| OwnedTable::try_from_iter(iter))??;
Ok(res)
Ok(result)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,18 +74,18 @@ impl<S: Scalar> CompositePolynomialBuilder<S> {
&mut self,
terms: &[Box<dyn MultilinearExtension<S> + '_>],
) -> Vec<Rc<Vec<S>>> {
let mut terms_p = Vec::with_capacity(terms.len());
let mut deduplicated_terms = Vec::with_capacity(terms.len());
for term in terms {
let id = term.id();
if let Some(term_p) = self.mles.get(&id) {
terms_p.push(term_p.clone());
if let Some(cached_term) = self.mles.get(&id) {
deduplicated_terms.push(cached_term.clone());
} else {
let term_p = term.to_sumcheck_term(self.num_sumcheck_variables);
self.mles.insert(id, term_p.clone());
terms_p.push(term_p);
let new_term = term.to_sumcheck_term(self.num_sumcheck_variables);
self.mles.insert(id, new_term.clone());
deduplicated_terms.push(new_term);
}
}
terms_p
deduplicated_terms
}

/// Create a composite polynomial that is the sum of all of the
Expand Down
6 changes: 3 additions & 3 deletions crates/proof-of-sql/src/sql/proof_exprs/comparison_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,14 @@ fn unchecked_subtract_impl<'a, S: Scalar>(
rhs: &[S],
table_length: usize,
) -> ConversionResult<&'a [S]> {
let res = alloc.alloc_slice_fill_default(table_length);
if_rayon!(res.par_iter_mut(), res.iter_mut())
let result = alloc.alloc_slice_fill_default(table_length);
if_rayon!(result.par_iter_mut(), result.iter_mut())
.zip(lhs)
.zip(rhs)
.for_each(|((a, l), r)| {
*a = *l - *r;
});
Ok(res)
Ok(result)
}

/// Scale LHS and RHS to the same scale if at least one of them is decimal
Expand Down
12 changes: 6 additions & 6 deletions crates/proof-of-sql/src/sql/proof_exprs/numerical_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ pub(crate) fn add_subtract_columns<'a, S: Scalar>(
let max_scale = lhs_scale.max(rhs_scale);
let lhs_scalar = lhs.to_scalar_with_scaling(max_scale - lhs_scale);
let rhs_scalar = rhs.to_scalar_with_scaling(max_scale - rhs_scale);
let res = alloc.alloc_slice_fill_with(lhs_len, |i| {
let result = alloc.alloc_slice_fill_with(lhs_len, |i| {
if is_subtract {
lhs_scalar[i] - rhs_scalar[i]
} else {
lhs_scalar[i] + rhs_scalar[i]
}
});
res
result
}

/// Multiply two columns together.
Expand Down Expand Up @@ -55,13 +55,13 @@ pub(crate) fn scale_and_add_subtract_eval<S: Scalar>(
is_subtract: bool,
) -> S {
let max_scale = lhs_scale.max(rhs_scale);
let scaled_lhs_eval = scale_scalar(lhs_eval, max_scale - lhs_scale)
let left_scaled_eval = scale_scalar(lhs_eval, max_scale - lhs_scale)
.expect("scaling factor should not be negative");
let scaled_rhs_eval = scale_scalar(rhs_eval, max_scale - rhs_scale)
let right_scaled_eval = scale_scalar(rhs_eval, max_scale - rhs_scale)
.expect("scaling factor should not be negative");
if is_subtract {
scaled_lhs_eval - scaled_rhs_eval
left_scaled_eval - right_scaled_eval
} else {
scaled_lhs_eval + scaled_rhs_eval
left_scaled_eval + right_scaled_eval
}
}
2 changes: 1 addition & 1 deletion crates/proof-of-sql/src/sql/proof_plans/filter_exec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ fn verify_filter<C: Commitment>(
Ok(c_evals)
}

#[allow(clippy::too_many_arguments)]
#[allow(clippy::too_many_arguments, clippy::many_single_char_names)]
pub(super) fn prove_filter<'a, S: Scalar + 'a>(
builder: &mut ProofBuilder<'a, S>,
alloc: &'a Bump,
Expand Down

0 comments on commit 822887f

Please sign in to comment.