From ed5364379684aa42c17fc871d3081cf4d1e86a6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jordan=20Fr=C3=A9ry?= Date: Fri, 30 Jun 2023 14:50:00 +0200 Subject: [PATCH] chore: enable rounding in FHE --- src/concrete/ml/quantization/quantized_module.py | 13 ------------- tests/quantization/test_quantized_module.py | 9 ++++----- tests/torch/test_compile_torch.py | 3 --- 3 files changed, 4 insertions(+), 21 deletions(-) diff --git a/src/concrete/ml/quantization/quantized_module.py b/src/concrete/ml/quantization/quantized_module.py index 89b9e9f5e..9a0aa011a 100644 --- a/src/concrete/ml/quantization/quantized_module.py +++ b/src/concrete/ml/quantization/quantized_module.py @@ -470,19 +470,6 @@ def _fhe_forward(self, *q_x: numpy.ndarray, simulate: bool = True) -> numpy.ndar "executing it in FHE.", ) - # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2888 - # Check if rounding is being executed in FHE, which is not currently optimized - # Assert that there are rounding nodes in the circuit graph - rounding_nodes = self.fhe_circuit.graph.query_nodes( # type: ignore[union-attr] - operation_filter="round_bit_pattern" - ) - - assert_true( - not rounding_nodes or simulate, - "Rounding is not currently optimized for execution in FHE. " - "Only simulation is allowed with a rounding operator.", - ) - results_cnp_circuit_list = [] for i in range(q_x[0].shape[0]): diff --git a/tests/quantization/test_quantized_module.py b/tests/quantization/test_quantized_module.py index e44dab0d8..b5dbc2fa5 100644 --- a/tests/quantization/test_quantized_module.py +++ b/tests/quantization/test_quantized_module.py @@ -343,11 +343,10 @@ def test_quantized_module_rounding_fhe(model_class, input_shape, default_configu # Run quantized_model in simulation quantized_model.forward(numpy_test, fhe="simulate") - # Try to execute the model with rounding in FHE execution mode - with pytest.raises( - AssertionError, match="Rounding is not currently optimized for execution in FHE" - ): - quantized_model.forward(numpy_test, fhe="execute") + # Execute the model with rounding in FHE execution mode + quantized_model.forward(numpy_test, fhe="execute") + + # TODO: https://github.com/zama-ai/concrete-ml-internal/issues/3800 def quantized_module_predictions_are_equal( diff --git a/tests/torch/test_compile_torch.py b/tests/torch/test_compile_torch.py index 475220e28..687219b4c 100644 --- a/tests/torch/test_compile_torch.py +++ b/tests/torch/test_compile_torch.py @@ -304,8 +304,6 @@ def accuracy_test_rounding( quantized_numpy_module_round_low_precision and quantized_numpy_module making sure that the rounding feature has the expected behavior on the model accuracy. """ - # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2888 - assert simulate, "Rounding is not available in FHE yet." # Check that the maximum_integer_bit_width is at least 4 bits to compare the rounding # feature with enough precision. @@ -393,7 +391,6 @@ def accuracy_test_rounding( q_x = tuple(q[[i]] for q in to_tuple(qtest)) # encrypt, run, and decrypt with different precision modes - # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2888 q_result = quantized_numpy_module.quantized_forward(*q_x, fhe="simulate") q_result_high_precision = quantized_numpy_module_round_high_precision.quantized_forward( *q_x,