Skip to content

Commit

Permalink
chore: resolved clippy::cast_lossless lint warnings (#227)
Browse files Browse the repository at this point in the history
# Rationale for this change

We have cargo clippy running in our CI in order to enforce code quality.
In order to increase our standards, we should enable the
clippy::pedantic lint group.

# What changes are included in this PR?

Resolved `clippy::cast_lossless` lint warnings

# Are these changes tested?

Yes.
  • Loading branch information
mehulmathur16 authored Oct 7, 2024
1 parent 48b2f0b commit 6237221
Show file tree
Hide file tree
Showing 20 changed files with 78 additions and 68 deletions.
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,4 +76,5 @@ semicolon_if_nothing_returned = "deny"
unnested_or_patterns = "deny"
unreadable_literal = "deny"
must_use_candidate = "deny"
range_plus_one = "deny"
range_plus_one = "deny"
cast_lossless = "deny"
2 changes: 1 addition & 1 deletion crates/proof-of-sql/benches/scaffold/random_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ pub fn generate_random_columns<'a, S: Scalar>(
}
(ColumnType::Int128, Some(b)) => {
Column::Int128(alloc.alloc_slice_fill_with(num_rows, |_| {
rng.gen_range((-b(num_rows) as i128)..=(b(num_rows) as i128))
rng.gen_range((i128::from(-b(num_rows)))..=(i128::from(b(num_rows))))
}))
}
(ColumnType::VarChar, _) => {
Expand Down
24 changes: 13 additions & 11 deletions crates/proof-of-sql/src/base/database/column_operation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,15 @@ pub fn try_add_subtract_column_types(
Ok(ColumnType::Scalar)
} else {
let left_precision_value =
lhs.precision_value().expect("Numeric types have precision") as i16;
i16::from(lhs.precision_value().expect("Numeric types have precision"));
let right_precision_value =
rhs.precision_value().expect("Numeric types have precision") as i16;
i16::from(rhs.precision_value().expect("Numeric types have precision"));
let left_scale = lhs.scale().expect("Numeric types have scale");
let right_scale = rhs.scale().expect("Numeric types have scale");
let scale = left_scale.max(right_scale);
let precision_value: i16 = scale as i16
+ (left_precision_value - left_scale as i16)
.max(right_precision_value - right_scale as i16)
let precision_value: i16 = i16::from(scale)
+ (left_precision_value - i16::from(left_scale))
.max(right_precision_value - i16::from(right_scale))
+ 1_i16;
let precision = u8::try_from(precision_value)
.map_err(|_| ColumnOperationError::DecimalConversionError {
Expand Down Expand Up @@ -115,7 +115,7 @@ pub fn try_multiply_column_types(
let scale = left_scale.checked_add(right_scale).ok_or(
ColumnOperationError::DecimalConversionError {
source: DecimalError::InvalidScale {
scale: left_scale as i16 + right_scale as i16,
scale: i16::from(left_scale) + i16::from(right_scale),
},
},
)?;
Expand Down Expand Up @@ -150,10 +150,12 @@ pub fn try_divide_column_types(
// We can unwrap here because we know that both types are integers
return Ok(lhs.max_integer_type(&rhs).unwrap());
}
let left_precision_value = lhs.precision_value().expect("Numeric types have precision") as i16;
let right_precision_value = rhs.precision_value().expect("Numeric types have precision") as i16;
let left_scale = lhs.scale().expect("Numeric types have scale") as i16;
let right_scale = rhs.scale().expect("Numeric types have scale") as i16;
let left_precision_value =
i16::from(lhs.precision_value().expect("Numeric types have precision"));
let right_precision_value =
i16::from(rhs.precision_value().expect("Numeric types have precision"));
let left_scale = i16::from(lhs.scale().expect("Numeric types have scale"));
let right_scale = i16::from(rhs.scale().expect("Numeric types have scale"));
let raw_scale = (left_scale + right_precision_value + 1_i16).max(6_i16);
let precision_value: i16 = left_precision_value - left_scale + right_scale + raw_scale;
let scale =
Expand Down Expand Up @@ -899,7 +901,7 @@ where
.scale()
.expect("numeric columns have scale");
let applied_scale = rhs_scale - lhs_scale + new_scale;
let applied_scale_factor = BigInt::from(10).pow(applied_scale.unsigned_abs() as u32);
let applied_scale_factor = BigInt::from(10).pow(u32::from(applied_scale.unsigned_abs()));
let result: Vec<S> = lhs
.iter()
.zip(rhs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pub fn convert_scalar_to_i256<S: Scalar>(val: &S) -> i256 {
let limbs: [u64; 4] = abs_scalar.into();

let low = (limbs[0] as u128) | ((limbs[1] as u128) << 64);
let high = (limbs[2] as i128) | ((limbs[3] as i128) << 64);
let high = i128::from(limbs[2]) | (i128::from(limbs[3]) << 64);

let abs_i256 = i256::from_parts(low, high);
if is_negative {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ pub fn make_random_test_accessor_data(
ColumnType::Int128 => {
column_fields.push(Field::new(*col_name, DataType::Decimal128(38, 0), false));

let values: Vec<i128> = values.iter().map(|x| *x as i128).collect();
let values: Vec<i128> = values.iter().map(|x| i128::from(*x)).collect();
columns.push(Arc::new(
Decimal128Array::from(values.clone())
.with_precision_and_scale(38, 0)
Expand Down
12 changes: 9 additions & 3 deletions crates/proof-of-sql/src/base/encode/varint_trait.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,12 @@ fn zigzag_decode(from: u64) -> i64 {
macro_rules! impl_varint {
($t:ty, unsigned) => {
impl VarInt for $t {
#[allow(clippy::cast_lossless)]
fn required_space(self) -> usize {
(self as u64).required_space()
}

#[allow(clippy::cast_lossless)]
fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
let (n, s) = u64::decode_var(src)?;
// This check is required to ensure that we actually return `None` when `src` has a value that would overflow `Self`.
Expand All @@ -84,17 +86,20 @@ macro_rules! impl_varint {
}
}

#[allow(clippy::cast_lossless)]
fn encode_var(self, dst: &mut [u8]) -> usize {
(self as u64).encode_var(dst)
}
}
};
($t:ty, signed) => {
impl VarInt for $t {
#[allow(clippy::cast_lossless)]
fn required_space(self) -> usize {
(self as i64).required_space()
}

#[allow(clippy::cast_lossless)]
fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
let (n, s) = i64::decode_var(src)?;
// This check is required to ensure that we actually return `None` when `src` has a value that would overflow `Self`.
Expand All @@ -105,6 +110,7 @@ macro_rules! impl_varint {
}
}

#[allow(clippy::cast_lossless)]
fn encode_var(self, dst: &mut [u8]) -> usize {
(self as i64).encode_var(dst)
}
Expand All @@ -124,7 +130,7 @@ impl_varint!(i8, signed);

impl VarInt for bool {
fn required_space(self) -> usize {
(self as u64).required_space()
u64::from(self).required_space()
}

fn decode_var(src: &[u8]) -> Option<(Self, usize)> {
Expand All @@ -138,7 +144,7 @@ impl VarInt for bool {
}

fn encode_var(self, dst: &mut [u8]) -> usize {
(self as u64).encode_var(dst)
u64::from(self).encode_var(dst)
}
}

Expand All @@ -159,7 +165,7 @@ impl VarInt for u64 {
let mut success = false;
for b in src {
let msb_dropped = b & DROP_MSB;
result |= (msb_dropped as u64) << shift;
result |= u64::from(msb_dropped) << shift;
shift += 7;

if shift > (9 * 7) {
Expand Down
18 changes: 9 additions & 9 deletions crates/proof-of-sql/src/base/encode/varint_trait_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,10 +294,10 @@ fn we_can_encode_and_decode_i32_and_i64_the_same() {
test_encode_and_decode_types_align::<i32, i64>(
&rng.gen::<[_; 32]>(),
&[
i32::MAX as i64 + 1,
i32::MIN as i64 - 1,
i32::MAX as i64 * 1000,
i32::MIN as i64 * 1000,
i64::from(i32::MAX) + 1,
i64::from(i32::MIN) - 1,
i64::from(i32::MAX) * 1000,
i64::from(i32::MIN) * 1000,
],
100,
);
Expand All @@ -308,7 +308,7 @@ fn we_can_encode_and_decode_u32_and_u64_the_same() {
let mut rng = rand::thread_rng();
test_encode_and_decode_types_align::<u32, u64>(
&rng.gen::<[_; 32]>(),
&[u32::MAX as u64 + 1, u32::MAX as u64 * 1000],
&[u64::from(u32::MAX) + 1, u64::from(u32::MAX) * 1000],
100,
);
}
Expand Down Expand Up @@ -423,10 +423,10 @@ fn we_can_encode_and_decode_i64_and_i128_the_same() {
test_encode_and_decode_types_align::<i64, i128>(
&rng.gen::<[_; 32]>(),
&[
i64::MAX as i128 + 1,
i64::MIN as i128 - 1,
i64::MAX as i128 * 1000,
i64::MIN as i128 * 1000,
i128::from(i64::MAX) + 1,
i128::from(i64::MIN) - 1,
i128::from(i64::MAX) * 1000,
i128::from(i64::MIN) * 1000,
],
100,
);
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/base/encode/zigzag.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ impl<T: MontConfig<4>> ZigZag<U256> for MontScalar<T> {
let (low_val, carry_low) = y.low.overflowing_sub(1_u128);

y.low = low_val;
y.high -= carry_low as u128; // we should never expect overflow here
y.high -= u128::from(carry_low); // we should never expect overflow here

// effectively encoding a ZigZag y
y
Expand Down Expand Up @@ -93,7 +93,7 @@ impl<T: MontConfig<4>> ZigZag<MontScalar<T>> for U256 {
let (low_val, carry_low) = zig_val.low.overflowing_add(1_u128);

zig_val.low = low_val;
zig_val.high += carry_low as u128; // we should never expect overflow here
zig_val.high += u128::from(carry_low); // we should never expect overflow here

// even though the encoding represented a -y,
// zig_val actually represents a `y` (we simply divided self by 2).
Expand Down
4 changes: 2 additions & 2 deletions crates/proof-of-sql/src/base/math/log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pub fn is_pow2<T: PrimInt + Unsigned>(x: T) -> bool {
}

pub fn log2_up<T: PrimInt + Unsigned>(x: T) -> usize {
let is_not_pow_2 = !is_pow2(x) as usize;
let is_not_pow_2 = usize::from(!is_pow2(x));
log2_down(x) + is_not_pow_2
}

Expand Down Expand Up @@ -50,7 +50,7 @@ pub fn log2_down_bytes<const N: usize>(data: &[u8; N]) -> usize {
/// If the data is 0, returns 0 instead of panicking.
#[cfg(test)]
pub fn log2_up_bytes<const N: usize>(data: &[u8; N]) -> usize {
let is_not_pow_2 = !is_pow2_bytes(data) as usize;
let is_not_pow_2 = usize::from(!is_pow2_bytes(data));
log2_down_bytes(data) + is_not_pow_2
}

Expand Down
10 changes: 5 additions & 5 deletions crates/proof-of-sql/src/base/scalar/mont_scalar.rs
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ where
error: format!("{value} is too large to fit in an i8"),
});
}
let val: i128 = sign * abs[0] as i128;
let val: i128 = sign * i128::from(abs[0]);
match val {
0 => Ok(false),
1 => Ok(true),
Expand Down Expand Up @@ -420,7 +420,7 @@ where
error: format!("{value} is too large to fit in an i8"),
});
}
let val: i128 = sign * abs[0] as i128;
let val: i128 = sign * i128::from(abs[0]);
val.try_into().map_err(|_| ScalarConversionError::Overflow {
error: format!("{value} is too large to fit in an i8"),
})
Expand All @@ -444,7 +444,7 @@ where
error: format!("{value} is too large to fit in an i16"),
});
}
let val: i128 = sign * abs[0] as i128;
let val: i128 = sign * i128::from(abs[0]);
val.try_into().map_err(|_| ScalarConversionError::Overflow {
error: format!("{value} is too large to fit in an i16"),
})
Expand All @@ -468,7 +468,7 @@ where
error: format!("{value} is too large to fit in an i32"),
});
}
let val: i128 = sign * abs[0] as i128;
let val: i128 = sign * i128::from(abs[0]);
val.try_into().map_err(|_| ScalarConversionError::Overflow {
error: format!("{value} is too large to fit in an i32"),
})
Expand All @@ -492,7 +492,7 @@ where
error: format!("{value} is too large to fit in an i64"),
});
}
let val: i128 = sign * abs[0] as i128;
let val: i128 = sign * i128::from(abs[0]);
val.try_into().map_err(|_| ScalarConversionError::Overflow {
error: format!("{value} is too large to fit in an i64"),
})
Expand Down
16 changes: 8 additions & 8 deletions crates/proof-of-sql/src/base/scalar/mont_scalar_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,11 +144,11 @@ fn test_curve25519_scalar_to_i8() {
#[test]
fn test_curve25519_scalar_to_i8_overflow() {
matches!(
i8::try_from(Curve25519Scalar::from(i8::MAX as i128 + 1)),
i8::try_from(Curve25519Scalar::from(i128::from(i8::MAX) + 1)),
Err(ScalarConversionError::Overflow { .. })
);
matches!(
i8::try_from(Curve25519Scalar::from(i8::MIN as i128 - 1)),
i8::try_from(Curve25519Scalar::from(i128::from(i8::MIN) - 1)),
Err(ScalarConversionError::Overflow { .. })
);
}
Expand All @@ -171,11 +171,11 @@ fn test_curve25519_scalar_to_i16() {
#[test]
fn test_curve25519_scalar_to_i16_overflow() {
matches!(
i16::try_from(Curve25519Scalar::from(i16::MAX as i128 + 1)),
i16::try_from(Curve25519Scalar::from(i128::from(i16::MAX) + 1)),
Err(ScalarConversionError::Overflow { .. })
);
matches!(
i16::try_from(Curve25519Scalar::from(i16::MIN as i128 - 1)),
i16::try_from(Curve25519Scalar::from(i128::from(i16::MIN) - 1)),
Err(ScalarConversionError::Overflow { .. })
);
}
Expand All @@ -198,11 +198,11 @@ fn test_curve25519_scalar_to_i32() {
#[test]
fn test_curve25519_scalar_to_i32_overflow() {
matches!(
i32::try_from(Curve25519Scalar::from(i32::MAX as i128 + 1)),
i32::try_from(Curve25519Scalar::from(i128::from(i32::MAX) + 1)),
Err(ScalarConversionError::Overflow { .. })
);
matches!(
i32::try_from(Curve25519Scalar::from(i32::MIN as i128 - 1)),
i32::try_from(Curve25519Scalar::from(i128::from(i32::MIN) - 1)),
Err(ScalarConversionError::Overflow { .. })
);
}
Expand All @@ -225,11 +225,11 @@ fn test_curve25519_scalar_to_i64() {
#[test]
fn test_curve25519_scalar_to_i64_overflow() {
matches!(
i64::try_from(Curve25519Scalar::from(i64::MAX as i128 + 1)),
i64::try_from(Curve25519Scalar::from(i128::from(i64::MAX) + 1)),
Err(ScalarConversionError::Overflow { .. })
);
matches!(
i64::try_from(Curve25519Scalar::from(i64::MIN as i128 - 1)),
i64::try_from(Curve25519Scalar::from(i128::from(i64::MIN) - 1)),
Err(ScalarConversionError::Overflow { .. })
);
}
Expand Down
14 changes: 7 additions & 7 deletions crates/proof-of-sql/src/base/slice_ops/slice_cast_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use curve25519_dalek::scalar::Scalar;
fn test_slice_map_to_vec() {
let a: Vec<u32> = vec![1, 2, 3, 4];
let b: Vec<u64> = vec![1, 2, 3, 4];
let a: Vec<u64> = slice_cast_with(&a, |&x| x as u64);
let a: Vec<u64> = slice_cast_with(&a, |&x| u64::from(x));
assert_eq!(a, b);
}

Expand Down Expand Up @@ -34,8 +34,8 @@ fn test_slice_cast_with_random() {
use rand::Rng;
let mut rng = rand::thread_rng();
let a: Vec<u32> = (0..100).map(|_| rng.gen()).collect();
let b: Vec<u64> = a.iter().map(|&x| x as u64).collect();
let a: Vec<u64> = slice_cast_with(&a, |&x| x as u64);
let b: Vec<u64> = a.iter().map(|&x| u64::from(x)).collect();
let a: Vec<u64> = slice_cast_with(&a, |&x| u64::from(x));
assert_eq!(a, b);
}

Expand Down Expand Up @@ -66,8 +66,8 @@ fn test_slice_cast_random_from_integer_to_curve25519scalar() {
fn test_slice_cast_mut() {
let a: Vec<u32> = vec![1, 2, 3, 4];
let mut b: Vec<u64> = vec![0, 0, 0, 0];
slice_cast_mut_with(&a, &mut b, |&x| x as u64);
assert_eq!(b, slice_cast_with(&a, |&x| x as u64));
slice_cast_mut_with(&a, &mut b, |&x| u64::from(x));
assert_eq!(b, slice_cast_with(&a, |&x| u64::from(x)));
}

/// random test for [`slice_cast_mut_with`]
Expand All @@ -77,8 +77,8 @@ fn test_slice_cast_mut_with_random() {
let mut rng = rand::thread_rng();
let a: Vec<u32> = (0..100).map(|_| rng.gen()).collect();
let mut b: Vec<u64> = vec![0; 100];
slice_cast_mut_with(&a, &mut b, |&x| x as u64);
assert_eq!(b, slice_cast_with(&a, |&x| x as u64));
slice_cast_mut_with(&a, &mut b, |&x| u64::from(x));
assert_eq!(b, slice_cast_with(&a, |&x| u64::from(x)));
}

/// random test for [`slice_cast_mut_with`]
Expand Down
Loading

0 comments on commit 6237221

Please sign in to comment.