From 09dd6e420b713419041b78e3550585042db63b17 Mon Sep 17 00:00:00 2001 From: Zama Bot <64638994+zama-bot@users.noreply.github.com> Date: Sat, 28 Sep 2024 11:35:03 +0200 Subject: [PATCH] Prepare release 1.7.0 (#897) Co-authored-by: andrei-stoian-zama <95410270+andrei-stoian-zama@users.noreply.github.com> --- docs/references/api/README.md | 21 +- ...oncrete.ml.common.serialization.decoder.md | 6 +- ...oncrete.ml.common.serialization.encoder.md | 10 +- .../api/concrete.ml.common.utils.md | 119 ++++- ...oncrete.ml.deployment.fhe_client_server.md | 32 +- .../api/concrete.ml.onnx.convert.md | 29 +- docs/references/api/concrete.ml.onnx.md | 2 +- ...ncrete.ml.onnx.onnx_model_manipulations.md | 24 + .../api/concrete.ml.onnx.onnx_utils.md | 30 +- .../api/concrete.ml.onnx.ops_impl.md | 14 +- .../api/concrete.ml.pytest.torch_models.md | 114 ++++ .../api/concrete.ml.pytest.utils.md | 4 +- ...crete.ml.quantization.base_quantized_op.md | 72 +-- .../concrete.ml.quantization.post_training.md | 4 +- ...ncrete.ml.quantization.quantized_module.md | 59 ++- .../concrete.ml.quantization.quantized_ops.md | 210 ++++++-- .../concrete.ml.quantization.quantizers.md | 96 ++-- .../api/concrete.ml.sklearn.base.md | 495 +++++++++--------- .../api/concrete.ml.sklearn.linear_model.md | 54 +- .../api/concrete.ml.sklearn.tree_to_numpy.md | 6 +- .../references/api/concrete.ml.sklearn.xgb.md | 12 +- .../api/concrete.ml.torch.compile.md | 18 +- .../api/concrete.ml.torch.hybrid_model.md | 87 +-- docs/references/api/concrete.ml.torch.lora.md | 312 +++++++++++ .../api/concrete.ml.torch.numpy_module.md | 34 +- pyproject.toml | 2 +- src/concrete/ml/version.py | 2 +- use_case_examples/resnet/README.md | 3 +- 28 files changed, 1318 insertions(+), 553 deletions(-) create mode 100644 docs/references/api/concrete.ml.torch.lora.md diff --git a/docs/references/api/README.md b/docs/references/api/README.md index 0922eac71..6a9698985 100644 --- a/docs/references/api/README.md +++ b/docs/references/api/README.md @@ -52,6 +52,7 @@ - [`concrete.ml.torch`](./concrete.ml.torch.md#module-concretemltorch): Modules for torch to numpy conversion. - [`concrete.ml.torch.compile`](./concrete.ml.torch.compile.md#module-concretemltorchcompile): torch compilation function. - [`concrete.ml.torch.hybrid_model`](./concrete.ml.torch.hybrid_model.md#module-concretemltorchhybrid_model): Implement the conversion of a torch model to a hybrid fhe/torch inference. +- [`concrete.ml.torch.lora`](./concrete.ml.torch.lora.md#module-concretemltorchlora): This module contains classes for LoRA (Low-Rank Adaptation) training and custom layers. - [`concrete.ml.torch.numpy_module`](./concrete.ml.torch.numpy_module.md#module-concretemltorchnumpy_module): A torch to numpy module. - [`concrete.ml.version`](./concrete.ml.version.md#module-concretemlversion): File to manage the version of the package. @@ -79,6 +80,7 @@ - [`torch_models.ConcatFancyIndexing`](./concrete.ml.pytest.torch_models.md#class-concatfancyindexing): Concat with fancy indexing. - [`torch_models.Conv1dModel`](./concrete.ml.pytest.torch_models.md#class-conv1dmodel): Small model that uses a 1D convolution operator. - [`torch_models.DoubleQuantQATMixNet`](./concrete.ml.pytest.torch_models.md#class-doublequantqatmixnet): Torch model that with two different quantizers on the input. +- [`torch_models.EmbeddingModel`](./concrete.ml.pytest.torch_models.md#class-embeddingmodel): A torch model with an embedding layer. - [`torch_models.EncryptedMatrixMultiplicationModel`](./concrete.ml.pytest.torch_models.md#class-encryptedmatrixmultiplicationmodel): PyTorch module for performing matrix multiplication between two encrypted values. - [`torch_models.ExpandModel`](./concrete.ml.pytest.torch_models.md#class-expandmodel): Minimalist network that expands the input tensor to a larger size. - [`torch_models.FC`](./concrete.ml.pytest.torch_models.md#class-fc): Torch model for the tests. @@ -108,6 +110,8 @@ - [`torch_models.TinyCNN`](./concrete.ml.pytest.torch_models.md#class-tinycnn): A very small CNN. - [`torch_models.TinyQATCNN`](./concrete.ml.pytest.torch_models.md#class-tinyqatcnn): A very small QAT CNN to classify the sklearn digits data-set. - [`torch_models.TorchCustomModel`](./concrete.ml.pytest.torch_models.md#class-torchcustommodel): A small network with Brevitas, trained on make_classification. +- [`torch_models.TorchDivide`](./concrete.ml.pytest.torch_models.md#class-torchdivide): Torch model that performs a encrypted division between two inputs. +- [`torch_models.TorchMultiply`](./concrete.ml.pytest.torch_models.md#class-torchmultiply): Torch model that performs a encrypted multiplication between two inputs. - [`torch_models.TorchSum`](./concrete.ml.pytest.torch_models.md#class-torchsum): Torch model to test the ReduceSum ONNX operator in a leveled circuit. - [`torch_models.UnivariateModule`](./concrete.ml.pytest.torch_models.md#class-univariatemodule): Torch model that calls univariate and shape functions of torch. - [`base_quantized_op.QuantizedMixingOp`](./concrete.ml.quantization.base_quantized_op.md#class-quantizedmixingop): An operator that mixes (adds or multiplies) together encrypted inputs. @@ -132,7 +136,7 @@ - [`quantized_ops.QuantizedClip`](./concrete.ml.quantization.quantized_ops.md#class-quantizedclip): Quantized clip op. - [`quantized_ops.QuantizedConcat`](./concrete.ml.quantization.quantized_ops.md#class-quantizedconcat): Concatenate operator. - [`quantized_ops.QuantizedConv`](./concrete.ml.quantization.quantized_ops.md#class-quantizedconv): Quantized Conv op. -- [`quantized_ops.QuantizedDiv`](./concrete.ml.quantization.quantized_ops.md#class-quantizeddiv): Div operator /. +- [`quantized_ops.QuantizedDiv`](./concrete.ml.quantization.quantized_ops.md#class-quantizeddiv): Quantized Division operator. - [`quantized_ops.QuantizedElu`](./concrete.ml.quantization.quantized_ops.md#class-quantizedelu): Quantized Elu op. - [`quantized_ops.QuantizedEqual`](./concrete.ml.quantization.quantized_ops.md#class-quantizedequal): Comparison operator ==. - [`quantized_ops.QuantizedErf`](./concrete.ml.quantization.quantized_ops.md#class-quantizederf): Quantized erf op. @@ -154,7 +158,7 @@ - [`quantized_ops.QuantizedMax`](./concrete.ml.quantization.quantized_ops.md#class-quantizedmax): Quantized Max op. - [`quantized_ops.QuantizedMaxPool`](./concrete.ml.quantization.quantized_ops.md#class-quantizedmaxpool): Quantized Max Pooling op. - [`quantized_ops.QuantizedMin`](./concrete.ml.quantization.quantized_ops.md#class-quantizedmin): Quantized Min op. -- [`quantized_ops.QuantizedMul`](./concrete.ml.quantization.quantized_ops.md#class-quantizedmul): Multiplication operator. +- [`quantized_ops.QuantizedMul`](./concrete.ml.quantization.quantized_ops.md#class-quantizedmul): Quantized Multiplication operator. - [`quantized_ops.QuantizedNeg`](./concrete.ml.quantization.quantized_ops.md#class-quantizedneg): Quantized Neg op. - [`quantized_ops.QuantizedNot`](./concrete.ml.quantization.quantized_ops.md#class-quantizednot): Quantized Not op. - [`quantized_ops.QuantizedOr`](./concrete.ml.quantization.quantized_ops.md#class-quantizedor): Or operator ||. @@ -222,6 +226,11 @@ - [`hybrid_model.HybridFHEModelServer`](./concrete.ml.torch.hybrid_model.md#class-hybridfhemodelserver): Hybrid FHE Model Server. - [`hybrid_model.LoggerStub`](./concrete.ml.torch.hybrid_model.md#class-loggerstub): Placeholder type for a typical logger like the one from loguru. - [`hybrid_model.RemoteModule`](./concrete.ml.torch.hybrid_model.md#class-remotemodule): A wrapper class for the modules to be evaluated remotely with FHE. +- [`lora.BackwardModuleLinear`](./concrete.ml.torch.lora.md#class-backwardmodulelinear): Backward module for linear layers. +- [`lora.CustomLinear`](./concrete.ml.torch.lora.md#class-customlinear): Custom linear module. +- [`lora.ForwardBackwardModule`](./concrete.ml.torch.lora.md#class-forwardbackwardmodule): Custom autograd function for forward and backward passes. +- [`lora.ForwardModuleLinear`](./concrete.ml.torch.lora.md#class-forwardmodulelinear): Forward module for linear layers. +- [`lora.LoraTraining`](./concrete.ml.torch.lora.md#class-loratraining): LoraTraining module for fine-tuning with LoRA in a hybrid model setting. - [`numpy_module.NumpyModule`](./concrete.ml.torch.numpy_module.md#class-numpymodule): General interface to transform a torch.nn.Module to numpy module. ## Functions @@ -242,7 +251,10 @@ - [`utils.all_values_are_integers`](./concrete.ml.common.utils.md#function-all_values_are_integers): Indicate if all unpacked values are of a supported integer dtype. - [`utils.all_values_are_of_dtype`](./concrete.ml.common.utils.md#function-all_values_are_of_dtype): Indicate if all unpacked values are of the specified dtype(s). - [`utils.array_allclose_and_same_shape`](./concrete.ml.common.utils.md#function-array_allclose_and_same_shape): Check if two numpy arrays are equal within a tolerances and have the same shape. +- [`utils.check_compilation_device_is_valid_and_is_cuda`](./concrete.ml.common.utils.md#function-check_compilation_device_is_valid_and_is_cuda): Check whether the device string for compilation or FHE execution is CUDA or CPU. +- [`utils.check_device_is_valid`](./concrete.ml.common.utils.md#function-check_device_is_valid): Check whether the device string is valid or raise an exception. - [`utils.check_dtype_and_cast`](./concrete.ml.common.utils.md#function-check_dtype_and_cast): Convert any allowed type into an array and cast it if required. +- [`utils.check_execution_device_is_valid_and_is_cuda`](./concrete.ml.common.utils.md#function-check_execution_device_is_valid_and_is_cuda): Check whether the circuit can be executed on the required device. - [`utils.check_there_is_no_p_error_options_in_configuration`](./concrete.ml.common.utils.md#function-check_there_is_no_p_error_options_in_configuration): Check the user did not set p_error or global_p_error in configuration. - [`utils.compute_bits_precision`](./concrete.ml.common.utils.md#function-compute_bits_precision): Compute the number of bits required to represent x. - [`utils.generate_proxy_function`](./concrete.ml.common.utils.md#function-generate_proxy_function): Generate a proxy function for a function accepting only \*args type arguments. @@ -265,7 +277,7 @@ - [`convert.get_equivalent_numpy_forward_from_onnx`](./concrete.ml.onnx.convert.md#function-get_equivalent_numpy_forward_from_onnx): Get the numpy equivalent forward of the provided ONNX model. - [`convert.get_equivalent_numpy_forward_from_onnx_tree`](./concrete.ml.onnx.convert.md#function-get_equivalent_numpy_forward_from_onnx_tree): Get the numpy equivalent forward of the provided ONNX model for tree-based models only. - [`convert.get_equivalent_numpy_forward_from_torch`](./concrete.ml.onnx.convert.md#function-get_equivalent_numpy_forward_from_torch): Get the numpy equivalent forward of the provided torch Module. -- [`convert.preprocess_onnx_model`](./concrete.ml.onnx.convert.md#function-preprocess_onnx_model): Get the numpy equivalent forward of the provided ONNX model. +- [`convert.preprocess_onnx_model`](./concrete.ml.onnx.convert.md#function-preprocess_onnx_model): Preprocess the ONNX model to be used for numpy execution. - [`onnx_impl_utils.compute_conv_output_dims`](./concrete.ml.onnx.onnx_impl_utils.md#function-compute_conv_output_dims): Compute the output shape of a pool or conv operation. - [`onnx_impl_utils.compute_onnx_pool_padding`](./concrete.ml.onnx.onnx_impl_utils.md#function-compute_onnx_pool_padding): Compute any additional padding needed to compute pooling layers. - [`onnx_impl_utils.numpy_onnx_pad`](./concrete.ml.onnx.onnx_impl_utils.md#function-numpy_onnx_pad): Pad a tensor according to ONNX spec, using an optional custom pad value. @@ -273,11 +285,13 @@ - [`onnx_impl_utils.rounded_comparison`](./concrete.ml.onnx.onnx_impl_utils.md#function-rounded_comparison): Comparison operation using `round_bit_pattern` function. - [`onnx_model_manipulations.clean_graph_after_node_op_type`](./concrete.ml.onnx.onnx_model_manipulations.md#function-clean_graph_after_node_op_type): Remove the nodes following first node matching node_op_type from the ONNX graph. - [`onnx_model_manipulations.clean_graph_at_node_op_type`](./concrete.ml.onnx.onnx_model_manipulations.md#function-clean_graph_at_node_op_type): Remove the first node matching node_op_type and its following nodes from the ONNX graph. +- [`onnx_model_manipulations.convert_first_gather_to_matmul`](./concrete.ml.onnx.onnx_model_manipulations.md#function-convert_first_gather_to_matmul): Convert the first Gather node to a matrix multiplication node. - [`onnx_model_manipulations.keep_following_outputs_discard_others`](./concrete.ml.onnx.onnx_model_manipulations.md#function-keep_following_outputs_discard_others): Keep the outputs given in outputs_to_keep and remove the others from the model. - [`onnx_model_manipulations.remove_identity_nodes`](./concrete.ml.onnx.onnx_model_manipulations.md#function-remove_identity_nodes): Remove identity nodes from a model. - [`onnx_model_manipulations.remove_node_types`](./concrete.ml.onnx.onnx_model_manipulations.md#function-remove_node_types): Remove unnecessary nodes from the ONNX graph. - [`onnx_model_manipulations.remove_unused_constant_nodes`](./concrete.ml.onnx.onnx_model_manipulations.md#function-remove_unused_constant_nodes): Remove unused Constant nodes in the provided onnx model. - [`onnx_model_manipulations.simplify_onnx_model`](./concrete.ml.onnx.onnx_model_manipulations.md#function-simplify_onnx_model): Simplify an ONNX model, removes unused Constant nodes and Identity nodes. +- [`onnx_utils.check_onnx_model`](./concrete.ml.onnx.onnx_utils.md#function-check_onnx_model): Check an ONNX model, handling large models (>2GB) by using external data. - [`onnx_utils.execute_onnx_with_numpy`](./concrete.ml.onnx.onnx_utils.md#function-execute_onnx_with_numpy): Execute the provided ONNX graph on the given inputs. - [`onnx_utils.execute_onnx_with_numpy_trees`](./concrete.ml.onnx.onnx_utils.md#function-execute_onnx_with_numpy_trees): Execute the provided ONNX graph on the given inputs for tree-based models only. - [`onnx_utils.get_attribute`](./concrete.ml.onnx.onnx_utils.md#function-get_attribute): Get the attribute from an ONNX AttributeProto. @@ -390,3 +404,4 @@ - [`hybrid_model.convert_conv1d_to_linear`](./concrete.ml.torch.hybrid_model.md#function-convert_conv1d_to_linear): Convert all Conv1D layers in a module or a Conv1D layer itself to nn.Linear. - [`hybrid_model.tuple_to_underscore_str`](./concrete.ml.torch.hybrid_model.md#function-tuple_to_underscore_str): Convert a tuple to a string representation. - [`hybrid_model.underscore_str_to_tuple`](./concrete.ml.torch.hybrid_model.md#function-underscore_str_to_tuple): Convert a a string representation of a tuple to a tuple. +- [`lora.get_remote_names`](./concrete.ml.torch.lora.md#function-get_remote_names): Get names of modules to be executed remotely. diff --git a/docs/references/api/concrete.ml.common.serialization.decoder.md b/docs/references/api/concrete.ml.common.serialization.decoder.md index 5d1faba90..1a0144f11 100644 --- a/docs/references/api/concrete.ml.common.serialization.decoder.md +++ b/docs/references/api/concrete.ml.common.serialization.decoder.md @@ -16,7 +16,7 @@ Custom decoder for serialization. ______________________________________________________________________ - + ## function `object_hook` @@ -42,13 +42,13 @@ If the input's type is non-native, then we expect it to have the following forma ______________________________________________________________________ - + ## class `ConcreteDecoder` Custom json decoder to handle non-native types found in serialized Concrete ML objects. - + ### method `__init__` diff --git a/docs/references/api/concrete.ml.common.serialization.encoder.md b/docs/references/api/concrete.ml.common.serialization.encoder.md index 6f6033e3a..031e6f15d 100644 --- a/docs/references/api/concrete.ml.common.serialization.encoder.md +++ b/docs/references/api/concrete.ml.common.serialization.encoder.md @@ -13,7 +13,7 @@ Custom encoder for serialization. ______________________________________________________________________ - + ## function `dump_name_and_value` @@ -35,7 +35,7 @@ Dump the value into a custom dict format. ______________________________________________________________________ - + ## class `ConcreteEncoder` @@ -49,7 +49,7 @@ The ConcreteEncoder is only meant to encode Concrete-ML's built-in models and th ______________________________________________________________________ - + ### method `default` @@ -73,7 +73,7 @@ Define a custom default method that enables dumping any supported serialized val ______________________________________________________________________ - + ### method `isinstance` @@ -96,7 +96,7 @@ Natively, among other types, the JSONENcoder handles integers, floating points a ______________________________________________________________________ - + ### method `iterencode` diff --git a/docs/references/api/concrete.ml.common.utils.md b/docs/references/api/concrete.ml.common.utils.md index e60dde0f1..cdf9a0a69 100644 --- a/docs/references/api/concrete.ml.common.utils.md +++ b/docs/references/api/concrete.ml.common.utils.md @@ -11,13 +11,14 @@ Utils that can be re-used by other pieces of code in the module. - **SUPPORTED_FLOAT_TYPES** - **SUPPORTED_INT_TYPES** - **SUPPORTED_TYPES** +- **SUPPORTED_DEVICES** - **MAX_BITWIDTH_BACKWARD_COMPATIBLE** - **USE_OLD_VL** - **QUANT_ROUND_LIKE_ROUND_PBS** ______________________________________________________________________ - + ## function `replace_invalid_arg_name_chars` @@ -39,7 +40,7 @@ This does not check that the starting character of arg_name is valid. ______________________________________________________________________ - + ## function `generate_proxy_function` @@ -65,7 +66,7 @@ This returns a runtime compiled function with the sanitized argument names passe ______________________________________________________________________ - + ## function `get_onnx_opset_version` @@ -85,7 +86,7 @@ Return the ONNX opset_version. ______________________________________________________________________ - + ## function `manage_parameters_for_pbs_errors` @@ -122,7 +123,7 @@ Note that global_p_error is currently set to 0 in the FHE simulation mode. ______________________________________________________________________ - + ## function `check_there_is_no_p_error_options_in_configuration` @@ -140,7 +141,7 @@ It would be dangerous, since we set them in direct arguments in our calls to Con ______________________________________________________________________ - + ## function `get_model_class` @@ -159,7 +160,7 @@ The model's class. ______________________________________________________________________ - + ## function `is_model_class_in_a_list` @@ -179,7 +180,7 @@ If the model's class is in the list or not. ______________________________________________________________________ - + ## function `get_model_name` @@ -198,7 +199,7 @@ the model's name. ______________________________________________________________________ - + ## function `is_classifier_or_partial_classifier` @@ -218,7 +219,7 @@ Indicate if the model class represents a classifier. ______________________________________________________________________ - + ## function `is_regressor_or_partial_regressor` @@ -238,7 +239,7 @@ Indicate if the model class represents a regressor. ______________________________________________________________________ - + ## function `is_pandas_dataframe` @@ -260,7 +261,7 @@ This function is inspired from Scikit-Learn's test validation tools and avoids t ______________________________________________________________________ - + ## function `is_pandas_series` @@ -282,7 +283,7 @@ This function is inspired from Scikit-Learn's test validation tools and avoids t ______________________________________________________________________ - + ## function `is_pandas_type` @@ -302,7 +303,7 @@ Indicate if the input container is a Pandas DataFrame or Series. ______________________________________________________________________ - + ## function `check_dtype_and_cast` @@ -334,7 +335,7 @@ If values types don't match with any supported type or the expected dtype, raise ______________________________________________________________________ - + ## function `compute_bits_precision` @@ -354,7 +355,7 @@ Compute the number of bits required to represent x. ______________________________________________________________________ - + ## function `is_brevitas_model` @@ -374,7 +375,7 @@ Check if a model is a Brevitas type. ______________________________________________________________________ - + ## function `to_tuple` @@ -394,7 +395,7 @@ Make the input a tuple if it is not already the case. ______________________________________________________________________ - + ## function `all_values_are_integers` @@ -414,7 +415,7 @@ Indicate if all unpacked values are of a supported integer dtype. ______________________________________________________________________ - + ## function `all_values_are_floats` @@ -434,7 +435,7 @@ Indicate if all unpacked values are of a supported float dtype. ______________________________________________________________________ - + ## function `all_values_are_of_dtype` @@ -460,7 +461,7 @@ Indicate if all unpacked values are of the specified dtype(s). ______________________________________________________________________ - + ## function `array_allclose_and_same_shape` @@ -490,7 +491,7 @@ Check if two numpy arrays are equal within a tolerances and have the same shape. ______________________________________________________________________ - + ## function `process_rounding_threshold_bits` @@ -516,7 +517,79 @@ Check and process the rounding_threshold_bits parameter. ______________________________________________________________________ - + + +## function `check_device_is_valid` + +```python +check_device_is_valid(device: str) → str +``` + +Check whether the device string is valid or raise an exception. + +**Args:** + +- `device` (str): the device string. Valid values are 'cpu', 'cuda' + +**Returns:** + +- `str`: the valid device string + +**Raises:** + +- `ValueError`: if the device string is incorrect + +______________________________________________________________________ + + + +## function `check_compilation_device_is_valid_and_is_cuda` + +```python +check_compilation_device_is_valid_and_is_cuda(device: str) → bool +``` + +Check whether the device string for compilation or FHE execution is CUDA or CPU. + +**Args:** + +- `device` (str): the device string. Valid values are 'cpu', 'cuda' + +**Returns:** + +- `bool`: whether GPU should be enabled for compilation + +**Raises:** + +- `ValueError`: if the device string is incorrect or if CUDA is not supported + +______________________________________________________________________ + + + +## function `check_execution_device_is_valid_and_is_cuda` + +```python +check_execution_device_is_valid_and_is_cuda( + is_compiled_for_cuda: bool, + fhe: Union[FheMode, str] +) → None +``` + +Check whether the circuit can be executed on the required device. + +**Args:** + +- `is_compiled_for_cuda` (bool): whether the circuit is compiled for CUDA +- `fhe` (Union\[FheMode, str\]): the execution mode of the circuit + +**Raises:** + +- `ValueError`: if the requested device is not available + +______________________________________________________________________ + + ## class `FheMode` diff --git a/docs/references/api/concrete.ml.deployment.fhe_client_server.md b/docs/references/api/concrete.ml.deployment.fhe_client_server.md index 569c520da..894a401f2 100644 --- a/docs/references/api/concrete.ml.deployment.fhe_client_server.md +++ b/docs/references/api/concrete.ml.deployment.fhe_client_server.md @@ -42,13 +42,13 @@ Mode for the FHE API. ______________________________________________________________________ - + ## class `FHEModelServer` Server API to load and run the FHE circuit. - + ### method `__init__` @@ -64,7 +64,7 @@ Initialize the FHE API. ______________________________________________________________________ - + ### method `load` @@ -76,7 +76,7 @@ Load the circuit. ______________________________________________________________________ - + ### method `run` @@ -100,13 +100,13 @@ Run the model on the server over encrypted data. ______________________________________________________________________ - + ## class `FHEModelDev` Dev API to save the model and then load and run the FHE circuit. - + ### method `__init__` @@ -123,14 +123,14 @@ Initialize the FHE API. ______________________________________________________________________ - + ### method `save` ```python save( mode: DeploymentMode = , - via_mlir: bool = False + via_mlir: bool = True ) ``` @@ -148,13 +148,13 @@ Export all needed artifacts for the client and server. ______________________________________________________________________ - + ## class `FHEModelClient` Client API to encrypt and decrypt FHE data. - + ### method `__init__` @@ -171,7 +171,7 @@ Initialize the FHE API. ______________________________________________________________________ - + ### method `deserialize_decrypt` @@ -193,7 +193,7 @@ Deserialize and decrypt the values. ______________________________________________________________________ - + ### method `deserialize_decrypt_dequantize` @@ -215,7 +215,7 @@ Deserialize, decrypt and de-quantize the values. ______________________________________________________________________ - + ### method `generate_private_and_evaluation_keys` @@ -231,7 +231,7 @@ Generate the private and evaluation keys. ______________________________________________________________________ - + ### method `get_serialized_evaluation_keys` @@ -247,7 +247,7 @@ Get the serialized evaluation keys. ______________________________________________________________________ - + ### method `load` @@ -259,7 +259,7 @@ Load the quantizers along with the FHE specs. ______________________________________________________________________ - + ### method `quantize_encrypt_serialize` diff --git a/docs/references/api/concrete.ml.onnx.convert.md b/docs/references/api/concrete.ml.onnx.convert.md index 11e3b4788..ed018a2ca 100644 --- a/docs/references/api/concrete.ml.onnx.convert.md +++ b/docs/references/api/concrete.ml.onnx.convert.md @@ -13,7 +13,7 @@ ONNX conversion related code. ______________________________________________________________________ - + ## function `fuse_matmul_bias_to_gemm` @@ -33,7 +33,7 @@ Fuse sequence of matmul -> add into a gemm node. ______________________________________________________________________ - + ## function `get_equivalent_numpy_forward_from_torch` @@ -42,7 +42,7 @@ get_equivalent_numpy_forward_from_torch( torch_module: Module, dummy_input: Union[Tensor, Tuple[Tensor, ]], output_onnx_file: Union[NoneType, Path, str] = None -) → Tuple[Callable[, Tuple[ndarray, ]], ModelProto] +) → Tuple[Callable[, Tuple[ndarray, ]], Union[ModelProto, NoneType], Callable[, Tuple[ndarray, ]], ModelProto] ``` Get the numpy equivalent forward of the provided torch Module. @@ -55,19 +55,22 @@ Get the numpy equivalent forward of the provided torch Module. **Returns:** -- `Tuple[Callable[..., Tuple[numpy.ndarray, ...]], onnx.GraphProto]`: The function that will execute the equivalent numpy code to the passed torch_module and the generated ONNX model. +- `ONNXAndNumpyForwards`: The function that will execute the equivalent numpy code to the passed torch_module and the generated ONNX model. ______________________________________________________________________ - + ## function `preprocess_onnx_model` ```python -preprocess_onnx_model(onnx_model: ModelProto, check_model: bool) → ModelProto +preprocess_onnx_model( + onnx_model: ModelProto, + check_model: bool +) → Tuple[Union[ModelProto, NoneType], ModelProto] ``` -Get the numpy equivalent forward of the provided ONNX model. +Preprocess the ONNX model to be used for numpy execution. **Args:** @@ -80,11 +83,11 @@ Get the numpy equivalent forward of the provided ONNX model. **Returns:** -- `onnx.ModelProto`: The preprocessed ONNX model. +- `Tuple[Optional[onnx.ModelProto], onnx.ModelProto]`: The preprocessing ONNX model and preprocessed ONNX model. The preprocessing model is None if there is no preprocessing required. ______________________________________________________________________ - + ## function `get_equivalent_numpy_forward_from_onnx` @@ -92,7 +95,7 @@ ______________________________________________________________________ get_equivalent_numpy_forward_from_onnx( onnx_model: ModelProto, check_model: bool = True -) → Tuple[Callable[, Tuple[ndarray, ]], ModelProto] +) → Tuple[Callable[, Tuple[ndarray, ]], Union[ModelProto, NoneType], Callable[, Tuple[ndarray, ]], ModelProto] ``` Get the numpy equivalent forward of the provided ONNX model. @@ -104,11 +107,11 @@ Get the numpy equivalent forward of the provided ONNX model. **Returns:** -- `Callable[..., Tuple[numpy.ndarray, ...]]`: The function that will execute the equivalent numpy function. +- `ONNXAndNumpyForwards`: The function that will execute the equivalent numpy function. ______________________________________________________________________ - + ## function `get_equivalent_numpy_forward_from_onnx_tree` @@ -130,4 +133,4 @@ Get the numpy equivalent forward of the provided ONNX model for tree-based model **Returns:** -- `Tuple[Callable[..., Tuple[numpy.ndarray, ...]], onnx.ModelProto]`: The function that will execute the equivalent numpy function. +- `Tuple[NumpyForwardCallable, onnx.ModelProto]`: The function that will execute the equivalent numpy function. diff --git a/docs/references/api/concrete.ml.onnx.md b/docs/references/api/concrete.ml.onnx.md index 039a6cc0e..3c2757afb 100644 --- a/docs/references/api/concrete.ml.onnx.md +++ b/docs/references/api/concrete.ml.onnx.md @@ -11,5 +11,5 @@ ONNX module. - **onnx_impl_utils** - **ops_impl** - **onnx_utils** -- **convert** - **onnx_model_manipulations** +- **convert** diff --git a/docs/references/api/concrete.ml.onnx.onnx_model_manipulations.md b/docs/references/api/concrete.ml.onnx.onnx_model_manipulations.md index cae6838ef..94124c003 100644 --- a/docs/references/api/concrete.ml.onnx.onnx_model_manipulations.md +++ b/docs/references/api/concrete.ml.onnx.onnx_model_manipulations.md @@ -146,3 +146,27 @@ Remove the nodes following first node matching node_op_type from the ONNX graph. **Raises:** - `ValueError`: If no node matched the given op_type and fail_if_not_found is set to True + +______________________________________________________________________ + + + +## function `convert_first_gather_to_matmul` + +```python +convert_first_gather_to_matmul( + onnx_model: ModelProto +) → Tuple[Union[ModelProto, NoneType], ModelProto] +``` + +Convert the first Gather node to a matrix multiplication node. + +In FHE, Gather is a costly operation since it can involve many PBS. When it appears first in the onnx model, we can remove it and replace it by a matrix multiplication node by converting the indices to a one-hot encoding. + +**Args:** + +- `onnx_model` (onnx.ModelProto): The onnx model. + +**Returns:** + +- `Tuple[Optional[onnx.ModelProto], onnx.ModelProto]`: The pre-processing model and the modified onnx model. diff --git a/docs/references/api/concrete.ml.onnx.onnx_utils.md b/docs/references/api/concrete.ml.onnx.onnx_utils.md index 946b81b8b..fa752f700 100644 --- a/docs/references/api/concrete.ml.onnx.onnx_utils.md +++ b/docs/references/api/concrete.ml.onnx.onnx_utils.md @@ -19,7 +19,7 @@ Utils to interpret an ONNX model with numpy. ______________________________________________________________________ - + ## function `get_attribute` @@ -39,7 +39,7 @@ Get the attribute from an ONNX AttributeProto. ______________________________________________________________________ - + ## function `get_op_type` @@ -59,7 +59,7 @@ Construct the qualified type name of the ONNX operator. ______________________________________________________________________ - + ## function `execute_onnx_with_numpy` @@ -80,7 +80,7 @@ Execute the provided ONNX graph on the given inputs. ______________________________________________________________________ - + ## function `execute_onnx_with_numpy_trees` @@ -106,7 +106,7 @@ Execute the provided ONNX graph on the given inputs for tree-based models only. ______________________________________________________________________ - + ## function `remove_initializer_from_input` @@ -125,3 +125,23 @@ In some cases, ONNX initializers may appear, erroneously, as graph inputs. This **Returns:** - `onnx.ModelProto`: the cleaned model + +______________________________________________________________________ + + + +## function `check_onnx_model` + +```python +check_onnx_model(onnx_model: ModelProto) → None +``` + +Check an ONNX model, handling large models (>2GB) by using external data. + +**Args:** + +- `onnx_model` (onnx.ModelProto): The ONNX model to check. + +**Raises:** + +- `ValueError`: If the model is too large (>2GB) or if there's another ValueError. diff --git a/docs/references/api/concrete.ml.onnx.ops_impl.md b/docs/references/api/concrete.ml.onnx.ops_impl.md index 0ceb72791..9323e9bd2 100644 --- a/docs/references/api/concrete.ml.onnx.ops_impl.md +++ b/docs/references/api/concrete.ml.onnx.ops_impl.md @@ -1585,7 +1585,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Pow-13 ______________________________________________________________________ - + ## function `numpy_floor` @@ -1607,7 +1607,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Floor-1 ______________________________________________________________________ - + ## function `numpy_max` @@ -1632,7 +1632,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Max-1 ______________________________________________________________________ - + ## function `numpy_min` @@ -1657,7 +1657,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Max-1 ______________________________________________________________________ - + ## function `numpy_sign` @@ -1679,7 +1679,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Sign-9 ______________________________________________________________________ - + ## function `numpy_neg` @@ -1701,7 +1701,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Sign-9 ______________________________________________________________________ - + ## function `numpy_concatenate` @@ -1724,7 +1724,7 @@ See https://github.com/onnx/onnx/blob/main/docs/Changelog.md#concat-13 ______________________________________________________________________ - + ## function `numpy_unfold` diff --git a/docs/references/api/concrete.ml.pytest.torch_models.md b/docs/references/api/concrete.ml.pytest.torch_models.md index 55c5a540c..36e7bb856 100644 --- a/docs/references/api/concrete.ml.pytest.torch_models.md +++ b/docs/references/api/concrete.ml.pytest.torch_models.md @@ -1623,3 +1623,117 @@ Forward pass. **Returns:** - `Tuple[torch.Tensor. torch.Tensor]`: Outputs of the network. + +______________________________________________________________________ + + + +## class `TorchDivide` + +Torch model that performs a encrypted division between two inputs. + + + +### method `__init__` + +```python +__init__(input_output, activation_function) +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(x, y) +``` + +Forward pass. + +**Args:** + +- `x` (torch.Tensor): The first input tensor. +- `y` (torch.Tensor): The second input tensor. + +**Returns:** + +- `torch.Tensor`: The result of the division. + +______________________________________________________________________ + + + +## class `TorchMultiply` + +Torch model that performs a encrypted multiplication between two inputs. + + + +### method `__init__` + +```python +__init__(input_output, activation_function) +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(x, y) +``` + +Forward pass. + +**Args:** + +- `x` (torch.Tensor): The first input tensor. +- `y` (torch.Tensor): The second input tensor. + +**Returns:** + +- `torch.Tensor`: The result of the multiplication. + +______________________________________________________________________ + + + +## class `EmbeddingModel` + +A torch model with an embedding layer. + + + +### method `__init__` + +```python +__init__( + num_embeddings, + embedding_dim, + activation_function= +) +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(x) +``` + +Forward pass. + +**Args:** + +- `x` (torch.Tensor): The input tensor containing indices. + +**Returns:** + +- `torch.Tensor`: The output tensor after embedding. diff --git a/docs/references/api/concrete.ml.pytest.utils.md b/docs/references/api/concrete.ml.pytest.utils.md index ae87301c8..eb4c64d9f 100644 --- a/docs/references/api/concrete.ml.pytest.utils.md +++ b/docs/references/api/concrete.ml.pytest.utils.md @@ -289,7 +289,7 @@ This function serializes all objects using the `dump`, `dumps`, `load` and `load ______________________________________________________________________ - + ## function `get_random_samples` @@ -314,7 +314,7 @@ Select `n_sample` random elements from a 2D NumPy array. ______________________________________________________________________ - + ## function `pandas_dataframe_are_equal` diff --git a/docs/references/api/concrete.ml.quantization.base_quantized_op.md b/docs/references/api/concrete.ml.quantization.base_quantized_op.md index 40365fb39..09ab25a72 100644 --- a/docs/references/api/concrete.ml.quantization.base_quantized_op.md +++ b/docs/references/api/concrete.ml.quantization.base_quantized_op.md @@ -56,7 +56,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `calibrate` @@ -76,7 +76,7 @@ Create corresponding QuantizedArray for the output of the activation function. ______________________________________________________________________ - + ### method `call_impl` @@ -97,7 +97,7 @@ Call self.impl to centralize mypy bug workaround. ______________________________________________________________________ - + ### method `can_fuse` @@ -115,7 +115,7 @@ This function shall be overloaded by inheriting classes to test self.\_int_input ______________________________________________________________________ - + ### method `dump` @@ -147,7 +147,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -163,7 +163,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `load_dict` @@ -183,7 +183,7 @@ Load itself from a string. ______________________________________________________________________ - + ### classmethod `must_quantize_input` @@ -205,7 +205,7 @@ Quantized ops and numpy onnx ops take inputs and attributes. Inputs can be eithe ______________________________________________________________________ - + ### classmethod `op_type` @@ -221,7 +221,7 @@ Get the type of this operation. ______________________________________________________________________ - + ### method `prepare_output` @@ -243,7 +243,7 @@ The calibrate method needs to be called with sample data before using this funct ______________________________________________________________________ - + ### method `q_impl` @@ -267,7 +267,7 @@ Execute the quantized forward. ______________________________________________________________________ - + ## class `QuantizedOpUnivariateOfEncrypted` @@ -275,7 +275,7 @@ An univariate operator of an encrypted value. This operation is not really operating as a quantized operation. It is useful when the computations get fused into a TLU, as in e.g., Act(x) = x || (x + 42)). - + ### method `__init__` @@ -302,7 +302,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `calibrate` @@ -322,7 +322,7 @@ Create corresponding QuantizedArray for the output of the activation function. ______________________________________________________________________ - + ### method `call_impl` @@ -343,7 +343,7 @@ Call self.impl to centralize mypy bug workaround. ______________________________________________________________________ - + ### method `can_fuse` @@ -361,7 +361,7 @@ This operation can be fused and computed in float when a single integer tensor g ______________________________________________________________________ - + ### method `dump` @@ -393,7 +393,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -409,7 +409,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `load_dict` @@ -429,7 +429,7 @@ Load itself from a string. ______________________________________________________________________ - + ### classmethod `must_quantize_input` @@ -451,7 +451,7 @@ Quantized ops and numpy onnx ops take inputs and attributes. Inputs can be eithe ______________________________________________________________________ - + ### classmethod `op_type` @@ -467,7 +467,7 @@ Get the type of this operation. ______________________________________________________________________ - + ### method `prepare_output` @@ -489,7 +489,7 @@ The calibrate method needs to be called with sample data before using this funct ______________________________________________________________________ - + ### method `q_impl` @@ -513,7 +513,7 @@ Execute the quantized forward. ______________________________________________________________________ - + ## class `QuantizedMixingOp` @@ -521,7 +521,7 @@ An operator that mixes (adds or multiplies) together encrypted inputs. Mixing operators cannot be fused to TLUs. - + ### method `__init__` @@ -553,7 +553,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `calibrate` @@ -573,7 +573,7 @@ Create corresponding QuantizedArray for the output of the activation function. ______________________________________________________________________ - + ### method `call_impl` @@ -594,7 +594,7 @@ Call self.impl to centralize mypy bug workaround. ______________________________________________________________________ - + ### method `can_fuse` @@ -612,7 +612,7 @@ Mixing operations cannot be fused since it must be performed over integer tensor ______________________________________________________________________ - + ### method `cnp_round` @@ -638,7 +638,7 @@ Round the input array to the specified number of bits. ______________________________________________________________________ - + ### method `dump` @@ -670,7 +670,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -686,7 +686,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `load_dict` @@ -706,7 +706,7 @@ Load itself from a string. ______________________________________________________________________ - + ### method `make_output_quant_parameters` @@ -732,7 +732,7 @@ Build a quantized array from quantized integer results of the op and quantizatio ______________________________________________________________________ - + ### classmethod `must_quantize_input` @@ -754,7 +754,7 @@ Quantized ops and numpy onnx ops take inputs and attributes. Inputs can be eithe ______________________________________________________________________ - + ### classmethod `op_type` @@ -770,7 +770,7 @@ Get the type of this operation. ______________________________________________________________________ - + ### method `prepare_output` @@ -792,7 +792,7 @@ The calibrate method needs to be called with sample data before using this funct ______________________________________________________________________ - + ### method `q_impl` diff --git a/docs/references/api/concrete.ml.quantization.post_training.md b/docs/references/api/concrete.ml.quantization.post_training.md index 5db84f8ee..30fde1351 100644 --- a/docs/references/api/concrete.ml.quantization.post_training.md +++ b/docs/references/api/concrete.ml.quantization.post_training.md @@ -130,7 +130,7 @@ Following https://arxiv.org/abs/1712.05877 guidelines. ______________________________________________________________________ - + ## class `PostTrainingAffineQuantization` @@ -229,7 +229,7 @@ Following https://arxiv.org/abs/1712.05877 guidelines. ______________________________________________________________________ - + ## class `PostTrainingQATImporter` diff --git a/docs/references/api/concrete.ml.quantization.quantized_module.md b/docs/references/api/concrete.ml.quantization.quantized_module.md index bebd6f9e8..71275b531 100644 --- a/docs/references/api/concrete.ml.quantization.quantized_module.md +++ b/docs/references/api/concrete.ml.quantization.quantized_module.md @@ -14,13 +14,13 @@ QuantizedModule API. ______________________________________________________________________ - + ## class `QuantizedModule` Inference for a quantized model. - + ### method `__init__` @@ -29,7 +29,8 @@ __init__( ordered_module_input_names: Optional[Iterable[str]] = None, ordered_module_output_names: Optional[Iterable[str]] = None, quant_layers_dict: Optional[Dict[str, Tuple[Tuple[str, ], QuantizedOp]]] = None, - onnx_model: Optional[ModelProto] = None + onnx_model: Optional[ModelProto] = None, + onnx_preprocessing: Optional[ModelProto] = None ) ``` @@ -67,7 +68,7 @@ Get the post-processing parameters. ______________________________________________________________________ - + ### method `bitwidth_and_range_report` @@ -83,7 +84,7 @@ Report the ranges and bit-widths for layers that mix encrypted integer values. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -99,7 +100,7 @@ Check if the quantized module is compiled. ______________________________________________________________________ - + ### method `compile` @@ -112,7 +113,8 @@ compile( p_error: Optional[float] = None, global_p_error: Optional[float] = None, verbose: bool = False, - inputs_encryption_status: Optional[Sequence[str]] = None + inputs_encryption_status: Optional[Sequence[str]] = None, + device: str = 'cpu' ) → Circuit ``` @@ -128,6 +130,7 @@ Compile the module's forward function. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during simulation, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. - `inputs_encryption_status` (Optional\[Sequence\[str\]\]): encryption status ('clear', 'encrypted') for each input. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -139,7 +142,7 @@ Compile the module's forward function. ______________________________________________________________________ - + ### method `dequantize_output` @@ -159,7 +162,7 @@ Take the last layer q_out and use its de-quant function. ______________________________________________________________________ - + ### method `dump` @@ -175,7 +178,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -191,7 +194,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -207,7 +210,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `forward` @@ -235,7 +238,7 @@ This method executes the forward pass in the clear, with simulation or in FHE. I ______________________________________________________________________ - + ### method `load_dict` @@ -255,7 +258,7 @@ Load itself from a string. ______________________________________________________________________ - + ### method `post_processing` @@ -277,7 +280,27 @@ For quantized modules, there is no post-processing step but the method is kept t ______________________________________________________________________ - + + +### method `pre_processing` + +```python +pre_processing(*values: ndarray) → Tuple[ndarray, ] +``` + +Apply pre-processing to the input values. + +**Args:** + +- `values` (numpy.ndarray): The input values to pre-process. + +**Returns:** + +- `Tuple[numpy.ndarray, ...]`: The pre-processed values. + +______________________________________________________________________ + + ### method `quantize_input` @@ -299,7 +322,7 @@ Take the inputs in fp32 and quantize it using the learned quantization parameter ______________________________________________________________________ - + ### method `quantized_forward` @@ -323,7 +346,7 @@ Forward function for the FHE circuit. ______________________________________________________________________ - + ### method `set_inputs_quantization_parameters` @@ -339,7 +362,7 @@ Set the quantization parameters for the module's inputs. ______________________________________________________________________ - + ### method `set_reduce_sum_copy` diff --git a/docs/references/api/concrete.ml.quantization.quantized_ops.md b/docs/references/api/concrete.ml.quantization.quantized_ops.md index e2e3bc371..1506aca1f 100644 --- a/docs/references/api/concrete.ml.quantization.quantized_ops.md +++ b/docs/references/api/concrete.ml.quantization.quantized_ops.md @@ -1027,9 +1027,21 @@ ______________________________________________________________________ ## class `QuantizedDiv` -Div operator /. +Quantized Division operator. -This operation is not really working as a quantized operation. It just works when things got fused, as in e.g., Act(x) = 1000 / (x + 42)) +Can divide either two variables (both encrypted) or a variable and a constant + + + +### method `__init__` + +```python +__init__( + *args, + rounding_threshold_bits: Union[NoneType, int, Dict[str, Union[str, int]]] = None, + **kwargs +) → None +``` ______________________________________________________________________ @@ -1043,13 +1055,65 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + + +### method `calibrate` + +```python +calibrate(*inputs: ndarray) → ndarray +``` + +Create corresponding QuantizedArray for the output of the activation function. + +**Args:** + +- `*inputs (numpy.ndarray)`: Calibration sample inputs. + +**Returns:** + +- `numpy.ndarray`: the output values for the provided calibration samples. + +______________________________________________________________________ + + + +### method `can_fuse` + +```python +can_fuse() → bool +``` + +Determine if this op can be fused. + +Div operation can be computed in float and fused if it operates over inputs produced by a single integer tensor. + +**Returns:** + +- `bool`: Whether the number of integer input tensors allows computing this op as a TLU + +______________________________________________________________________ + + + +### method `q_impl` + +```python +q_impl( + *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float], + calibrate_rounding: bool = False, + **attrs +) → Union[ndarray, QuantizedArray, NoneType, bool, int, float] +``` + +______________________________________________________________________ + + ## class `QuantizedMul` -Multiplication operator. +Quantized Multiplication operator. -Only multiplies an encrypted tensor with a float constant for now. This operation will be fused to a (potentially larger) TLU. +Can multiply either two variables (both encrypted) or a variable and a constant ______________________________________________________________________ @@ -1063,7 +1127,39 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + + +### method `can_fuse` + +```python +can_fuse() → bool +``` + +Determine if this op can be fused. + +Mul operation can be computed in float and fused if it operates over inputs produced by a single integer tensor. + +**Returns:** + +- `bool`: Whether the number of integer input tensors allows computing this op as a TLU + +______________________________________________________________________ + + + +### method `q_impl` + +```python +q_impl( + *q_inputs: Union[ndarray, QuantizedArray, NoneType, bool, int, float], + calibrate_rounding: bool = False, + **attrs +) → Union[ndarray, QuantizedArray, NoneType, bool, int, float] +``` + +______________________________________________________________________ + + ## class `QuantizedSub` @@ -1114,7 +1210,7 @@ q_impl( ______________________________________________________________________ - + ## class `QuantizedBatchNormalization` @@ -1132,7 +1228,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `calibrate` @@ -1152,7 +1248,7 @@ Create corresponding QuantizedArray for the output of the activation function. ______________________________________________________________________ - + ## class `QuantizedFlatten` @@ -1170,7 +1266,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1188,7 +1284,7 @@ Flatten operation cannot be fused since it must be performed over integer tensor ______________________________________________________________________ - + ### method `q_impl` @@ -1212,13 +1308,13 @@ Flatten the input integer encrypted tensor. ______________________________________________________________________ - + ## class `QuantizedReduceSum` ReduceSum with encrypted input. - + ### method `__init__` @@ -1259,7 +1355,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `calibrate` @@ -1279,7 +1375,7 @@ Create corresponding QuantizedArray for the output of the activation function. ______________________________________________________________________ - + ### method `q_impl` @@ -1305,7 +1401,7 @@ Sum the encrypted tensor's values along the given axes. ______________________________________________________________________ - + ## class `QuantizedErf` @@ -1323,7 +1419,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedNot` @@ -1341,13 +1437,13 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedBrevitasQuant` Brevitas uniform quantization with encrypted input. - + ### method `__init__` @@ -1390,7 +1486,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `calibrate` @@ -1410,7 +1506,7 @@ Create corresponding QuantizedArray for the output of Quantization function. ______________________________________________________________________ - + ### method `q_impl` @@ -1434,7 +1530,7 @@ Quantize values. ______________________________________________________________________ - + ## class `QuantizedTranspose` @@ -1454,7 +1550,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1472,7 +1568,7 @@ Transpose can not be fused since it must be performed over integer tensors as it ______________________________________________________________________ - + ### method `q_impl` @@ -1496,7 +1592,7 @@ Transpose the input integer encrypted tensor. ______________________________________________________________________ - + ## class `QuantizedFloor` @@ -1514,7 +1610,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedMax` @@ -1532,7 +1628,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedMin` @@ -1550,7 +1646,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedNeg` @@ -1568,7 +1664,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedSign` @@ -1586,7 +1682,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedUnsqueeze` @@ -1604,7 +1700,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1622,7 +1718,7 @@ Unsqueeze can not be fused since it must be performed over integer tensors as it ______________________________________________________________________ - + ### method `q_impl` @@ -1646,7 +1742,7 @@ Unsqueeze the input tensors on a given axis. ______________________________________________________________________ - + ## class `QuantizedConcat` @@ -1664,7 +1760,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1682,7 +1778,7 @@ Concatenation can not be fused since it must be performed over integer tensors a ______________________________________________________________________ - + ### method `q_impl` @@ -1706,7 +1802,7 @@ Concatenate the input tensors on a given axis. ______________________________________________________________________ - + ## class `QuantizedSqueeze` @@ -1724,7 +1820,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1742,7 +1838,7 @@ Squeeze can not be fused since it must be performed over integer tensors as it r ______________________________________________________________________ - + ### method `q_impl` @@ -1766,7 +1862,7 @@ Squeeze the input tensors on a given axis. ______________________________________________________________________ - + ## class `ONNXShape` @@ -1784,7 +1880,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1802,7 +1898,7 @@ This operation returns the shape of the tensor and thus can not be fused into a ______________________________________________________________________ - + ### method `q_impl` @@ -1815,7 +1911,7 @@ q_impl( ______________________________________________________________________ - + ## class `ONNXConstantOfShape` @@ -1833,7 +1929,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1851,7 +1947,7 @@ This operation returns a new encrypted tensor and thus can not be fused. ______________________________________________________________________ - + ## class `ONNXGather` @@ -1871,7 +1967,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1889,7 +1985,7 @@ This operation returns values from a tensor and thus can not be fused into a uni ______________________________________________________________________ - + ### method `q_impl` @@ -1902,7 +1998,7 @@ q_impl( ______________________________________________________________________ - + ## class `ONNXSlice` @@ -1920,7 +2016,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1938,7 +2034,7 @@ This operation returns values from a tensor and thus can not be fused into a uni ______________________________________________________________________ - + ### method `q_impl` @@ -1951,7 +2047,7 @@ q_impl( ______________________________________________________________________ - + ## class `QuantizedExpand` @@ -1969,7 +2065,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `can_fuse` @@ -1987,7 +2083,7 @@ Unsqueeze can not be fused since it must be performed over integer tensors as it ______________________________________________________________________ - + ### method `q_impl` @@ -2011,7 +2107,7 @@ Expand the input tensor to a specified shape. ______________________________________________________________________ - + ## class `QuantizedEqual` @@ -2019,7 +2115,7 @@ Comparison operator ==. Only supports comparison with a constant. - + ### method `__init__` @@ -2046,13 +2142,13 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ## class `QuantizedUnfold` Quantized Unfold op. - + ### method `__init__` @@ -2079,7 +2175,7 @@ Get the names of encrypted integer tensors that are used by this op. ______________________________________________________________________ - + ### method `q_impl` diff --git a/docs/references/api/concrete.ml.quantization.quantizers.md b/docs/references/api/concrete.ml.quantization.quantizers.md index 8b259c098..d6d6a6abc 100644 --- a/docs/references/api/concrete.ml.quantization.quantizers.md +++ b/docs/references/api/concrete.ml.quantization.quantizers.md @@ -73,7 +73,7 @@ Get a copy of the quantization parameters. ______________________________________________________________________ - + ### method `copy_opts` @@ -89,7 +89,7 @@ Copy the options from a different structure. ______________________________________________________________________ - + ### method `dump` @@ -105,7 +105,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -121,7 +121,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -137,7 +137,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `is_equal` @@ -158,7 +158,7 @@ Compare two quantization options sets. ______________________________________________________________________ - + ### method `load_dict` @@ -178,7 +178,7 @@ Load itself from a string. ______________________________________________________________________ - + ## class `MinMaxQuantizationStats` @@ -186,7 +186,7 @@ Calibration set statistics. This class stores the statistics for the calibration set or for a calibration data batch. Currently we only store min/max to determine the quantization range. The min/max are computed from the calibration set. - + ### method `__init__` @@ -210,7 +210,7 @@ Get a copy of the calibration set statistics. ______________________________________________________________________ - + ### method `check_is_uniform_quantized` @@ -232,7 +232,7 @@ Determines whether the values represented by this QuantizedArray show a quantize ______________________________________________________________________ - + ### method `compute_quantization_stats` @@ -248,7 +248,7 @@ Compute the calibration set quantization statistics. ______________________________________________________________________ - + ### method `copy_stats` @@ -264,7 +264,7 @@ Copy the statistics from a different structure. ______________________________________________________________________ - + ### method `dump` @@ -280,7 +280,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -296,7 +296,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -312,7 +312,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `load_dict` @@ -332,7 +332,7 @@ Load itself from a string. ______________________________________________________________________ - + ## class `UniformQuantizationParameters` @@ -340,7 +340,7 @@ Quantization parameters for uniform quantization. This class stores the parameters used for quantizing real values to discrete integer values. The parameters are computed from quantization options and quantization statistics. - + ### method `__init__` @@ -364,7 +364,7 @@ Get a copy of the quantization parameters. ______________________________________________________________________ - + ### method `compute_quantization_parameters` @@ -384,7 +384,7 @@ Compute the quantization parameters. ______________________________________________________________________ - + ### method `copy_params` @@ -400,7 +400,7 @@ Copy the parameters from a different structure. ______________________________________________________________________ - + ### method `dump` @@ -416,7 +416,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -432,7 +432,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -448,7 +448,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `load_dict` @@ -468,7 +468,7 @@ Load itself from a string. ______________________________________________________________________ - + ## class `UniformQuantizer` @@ -482,7 +482,7 @@ Contains all information necessary for uniform quantization and provides quantiz - `stats` (Optional\[MinMaxQuantizationStats\]): Quantization batch statistics set - `params` (Optional\[UniformQuantizationParameters\]): Quantization parameters set (scale, zero-point) - + ### method `__init__` @@ -527,7 +527,7 @@ Get a copy of the calibration set statistics. ______________________________________________________________________ - + ### method `check_is_uniform_quantized` @@ -549,7 +549,7 @@ Determines whether the values represented by this QuantizedArray show a quantize ______________________________________________________________________ - + ### method `compute_quantization_parameters` @@ -569,7 +569,7 @@ Compute the quantization parameters. ______________________________________________________________________ - + ### method `compute_quantization_stats` @@ -585,7 +585,7 @@ Compute the calibration set quantization statistics. ______________________________________________________________________ - + ### method `copy_opts` @@ -601,7 +601,7 @@ Copy the options from a different structure. ______________________________________________________________________ - + ### method `copy_params` @@ -617,7 +617,7 @@ Copy the parameters from a different structure. ______________________________________________________________________ - + ### method `copy_stats` @@ -633,7 +633,7 @@ Copy the statistics from a different structure. ______________________________________________________________________ - + ### method `dequant` @@ -653,7 +653,7 @@ De-quantize values. ______________________________________________________________________ - + ### method `dump` @@ -669,7 +669,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -685,7 +685,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -701,7 +701,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `is_equal` @@ -722,7 +722,7 @@ Compare two quantization options sets. ______________________________________________________________________ - + ### method `load_dict` @@ -742,7 +742,7 @@ Load itself from a string. ______________________________________________________________________ - + ### method `quant` @@ -762,7 +762,7 @@ Quantize values. ______________________________________________________________________ - + ## class `QuantizedArray` @@ -782,7 +782,7 @@ See https://arxiv.org/abs/1712.05877. - `params` (Optional\[UniformQuantizationParameters\]): Quantization parameters set (scale, zero-point) - `kwargs`: Any member of the options, stats, params sets as a key-value pair. The parameter sets need to be completely parametrized if their members appear in kwargs. - + ### method `__init__` @@ -800,7 +800,7 @@ __init__( ______________________________________________________________________ - + ### method `dequant` @@ -816,7 +816,7 @@ De-quantize self.qvalues. ______________________________________________________________________ - + ### method `dump` @@ -832,7 +832,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -848,7 +848,7 @@ Dump itself to a dict. ______________________________________________________________________ - + ### method `dumps` @@ -864,7 +864,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `load_dict` @@ -884,7 +884,7 @@ Load itself from a string. ______________________________________________________________________ - + ### method `quant` @@ -900,7 +900,7 @@ Quantize self.values. ______________________________________________________________________ - + ### method `update_quantized_values` @@ -922,7 +922,7 @@ Update qvalues to get their corresponding values using the related quantized par ______________________________________________________________________ - + ### method `update_values` diff --git a/docs/references/api/concrete.ml.sklearn.base.md b/docs/references/api/concrete.ml.sklearn.base.md index e98cd234c..30e8c2252 100644 --- a/docs/references/api/concrete.ml.sklearn.base.md +++ b/docs/references/api/concrete.ml.sklearn.base.md @@ -14,7 +14,7 @@ Base classes for all estimators. ______________________________________________________________________ - + ## class `BaseEstimator` @@ -26,7 +26,7 @@ This class does not inherit from sklearn.base.BaseEstimator as it creates some c - `_is_a_public_cml_model` (bool): Private attribute indicating if the class is a public model (as opposed to base or mixin classes). - + ### method `__init__` @@ -84,7 +84,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -100,7 +100,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -116,7 +116,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -128,7 +128,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -143,6 +144,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -150,7 +152,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -172,7 +174,7 @@ This step ensures that the fit method has been called. ______________________________________________________________________ - + ### method `dump` @@ -188,7 +190,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -204,7 +206,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -220,7 +222,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -243,7 +245,7 @@ The fitted estimator. ______________________________________________________________________ - + ### method `fit_benchmark` @@ -270,7 +272,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -292,7 +294,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -312,7 +314,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -336,7 +338,7 @@ For some simple models such a linear regression, there is no post-processing ste ______________________________________________________________________ - + ### method `predict` @@ -360,7 +362,7 @@ Predict values for X, in FHE or in the clear. ______________________________________________________________________ - + ### method `quantize_input` @@ -382,7 +384,7 @@ This step ensures that the fit method has been called. ______________________________________________________________________ - + ## class `BaseClassifier` @@ -390,7 +392,7 @@ Base class for linear and tree-based classifiers in Concrete ML. This class inherits from BaseEstimator and modifies some of its methods in order to align them with classifier behaviors. This notably include applying a sigmoid/softmax post-processing to the predicted values as well as handling a mapping of classes in case they are not ordered. - + ### method `__init__` @@ -472,7 +474,7 @@ Using this attribute is deprecated. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -488,7 +490,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -504,7 +506,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -516,7 +518,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -531,6 +534,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -538,7 +542,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -560,7 +564,7 @@ This step ensures that the fit method has been called. ______________________________________________________________________ - + ### method `dump` @@ -576,7 +580,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -592,7 +596,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -608,7 +612,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -618,7 +622,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -645,7 +649,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -667,7 +671,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -687,7 +691,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -697,7 +701,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -710,7 +714,7 @@ predict( ______________________________________________________________________ - + ### method `predict_proba` @@ -734,7 +738,7 @@ Predict class probabilities. ______________________________________________________________________ - + ### method `quantize_input` @@ -756,13 +760,13 @@ This step ensures that the fit method has been called. ______________________________________________________________________ - + ## class `QuantizedTorchEstimatorMixin` Mixin that provides quantization for a torch module and follows the Estimator API. - + ### method `__init__` @@ -838,7 +842,7 @@ Get the output quantizers. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -854,7 +858,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -870,7 +874,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -882,13 +886,14 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` ______________________________________________________________________ - + ### method `dequantize_output` @@ -898,7 +903,7 @@ dequantize_output(*q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -914,7 +919,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -930,7 +935,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -946,7 +951,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -971,7 +976,7 @@ The fitted estimator. ______________________________________________________________________ - + ### method `fit_benchmark` @@ -1002,7 +1007,7 @@ The Concrete ML and equivalent skorch fitted estimators. ______________________________________________________________________ - + ### method `get_params` @@ -1024,7 +1029,7 @@ This method is overloaded in order to make sure that auto-computed parameters ar ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -1034,7 +1039,7 @@ get_sklearn_params(deep: 'bool' = True) → Dict ______________________________________________________________________ - + ### classmethod `load_dict` @@ -1054,7 +1059,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -1064,7 +1069,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -1088,7 +1093,7 @@ Predict values for X, in FHE or in the clear. ______________________________________________________________________ - + ### method `prune` @@ -1116,7 +1121,7 @@ A new pruned copy of the Neural Network model. ______________________________________________________________________ - + ### method `quantize_input` @@ -1126,7 +1131,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `BaseTreeEstimatorMixin` @@ -1134,7 +1139,7 @@ Mixin class for tree-based estimators. This class inherits from sklearn.base.BaseEstimator in order to have access to scikit-learn's `get_params` and `set_params` methods. - + ### method `__init__` @@ -1196,7 +1201,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -1212,7 +1217,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -1228,7 +1233,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -1238,7 +1243,7 @@ compile(*args, **kwargs) → Circuit ______________________________________________________________________ - + ### method `dequantize_output` @@ -1248,7 +1253,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -1264,7 +1269,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -1280,7 +1285,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -1296,7 +1301,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -1306,7 +1311,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -1333,7 +1338,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -1360,7 +1365,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -1382,7 +1387,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -1402,7 +1407,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -1412,7 +1417,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -1425,7 +1430,7 @@ predict( ______________________________________________________________________ - + ### method `quantize_input` @@ -1435,7 +1440,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `BaseTreeRegressorMixin` @@ -1443,7 +1448,7 @@ Mixin class for tree-based regressors. This class is used to create a tree-based regressor class that inherits from sklearn.base.RegressorMixin, which essentially gives access to scikit-learn's `score` method for regressors. - + ### method `__init__` @@ -1505,7 +1510,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -1521,7 +1526,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -1537,7 +1542,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -1547,7 +1552,7 @@ compile(*args, **kwargs) → Circuit ______________________________________________________________________ - + ### method `dequantize_output` @@ -1557,7 +1562,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -1573,7 +1578,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -1589,7 +1594,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -1605,7 +1610,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -1615,7 +1620,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -1642,7 +1647,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -1669,7 +1674,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -1691,7 +1696,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -1711,7 +1716,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -1721,7 +1726,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -1734,7 +1739,7 @@ predict( ______________________________________________________________________ - + ### method `quantize_input` @@ -1744,7 +1749,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `BaseTreeClassifierMixin` @@ -1754,7 +1759,7 @@ This class is used to create a tree-based classifier class that inherits from sk Additionally, this class adjusts some of the tree-based base class's methods in order to make them compliant with classification workflows. - + ### method `__init__` @@ -1840,7 +1845,7 @@ Using this attribute is deprecated. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -1856,7 +1861,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -1872,7 +1877,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -1882,7 +1887,7 @@ compile(*args, **kwargs) → Circuit ______________________________________________________________________ - + ### method `dequantize_output` @@ -1892,7 +1897,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -1908,7 +1913,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -1924,7 +1929,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -1940,7 +1945,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -1950,7 +1955,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -1977,7 +1982,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -2004,7 +2009,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -2026,7 +2031,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -2046,7 +2051,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -2056,7 +2061,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -2069,7 +2074,7 @@ predict( ______________________________________________________________________ - + ### method `predict_proba` @@ -2093,7 +2098,7 @@ Predict class probabilities. ______________________________________________________________________ - + ### method `quantize_input` @@ -2103,7 +2108,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnLinearModelMixin` @@ -2111,7 +2116,7 @@ A Mixin class for sklearn linear models with FHE. This class inherits from sklearn.base.BaseEstimator in order to have access to scikit-learn's `get_params` and `set_params` methods. - + ### method `__init__` @@ -2173,7 +2178,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -2189,7 +2194,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -2205,7 +2210,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -2217,7 +2222,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -2232,6 +2238,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -2239,7 +2246,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -2249,7 +2256,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -2265,7 +2272,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -2281,7 +2288,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -2297,7 +2304,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -2307,7 +2314,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -2334,7 +2341,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -2361,7 +2368,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -2383,7 +2390,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -2403,7 +2410,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -2427,7 +2434,7 @@ For some simple models such a linear regression, there is no post-processing ste ______________________________________________________________________ - + ### method `predict` @@ -2451,7 +2458,7 @@ Predict values for X, in FHE or in the clear. ______________________________________________________________________ - + ### method `quantize_input` @@ -2461,7 +2468,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnLinearRegressorMixin` @@ -2469,7 +2476,7 @@ A Mixin class for sklearn linear regressors with FHE. This class is used to create a linear regressor class that inherits from sklearn.base.RegressorMixin, which essentially gives access to scikit-learn's `score` method for regressors. - + ### method `__init__` @@ -2531,7 +2538,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -2547,7 +2554,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -2563,7 +2570,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -2575,7 +2582,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -2590,6 +2598,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -2597,7 +2606,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -2607,7 +2616,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -2623,7 +2632,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -2639,7 +2648,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -2655,7 +2664,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -2665,7 +2674,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -2692,7 +2701,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -2719,7 +2728,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -2741,7 +2750,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -2761,7 +2770,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -2785,7 +2794,7 @@ For some simple models such a linear regression, there is no post-processing ste ______________________________________________________________________ - + ### method `predict` @@ -2809,7 +2818,7 @@ Predict values for X, in FHE or in the clear. ______________________________________________________________________ - + ### method `quantize_input` @@ -2819,7 +2828,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnLinearClassifierMixin` @@ -2829,7 +2838,7 @@ This class is used to create a linear classifier class that inherits from sklear Additionally, this class adjusts some of the tree-based base class's methods in order to make them compliant with classification workflows. - + ### method `__init__` @@ -2915,7 +2924,7 @@ Using this attribute is deprecated. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -2931,7 +2940,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -2947,7 +2956,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -2959,7 +2968,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -2974,6 +2984,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -2981,7 +2992,7 @@ Compile the model. ______________________________________________________________________ - + ### method `decision_function` @@ -3005,7 +3016,7 @@ Predict confidence scores. ______________________________________________________________________ - + ### method `dequantize_output` @@ -3015,7 +3026,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -3031,7 +3042,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -3047,7 +3058,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -3063,7 +3074,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -3073,7 +3084,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -3100,7 +3111,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -3127,7 +3138,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -3149,7 +3160,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -3169,7 +3180,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -3179,7 +3190,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -3192,7 +3203,7 @@ predict( ______________________________________________________________________ - + ### method `predict_proba` @@ -3205,7 +3216,7 @@ predict_proba( ______________________________________________________________________ - + ### method `quantize_input` @@ -3215,7 +3226,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnSGDRegressorMixin` @@ -3223,7 +3234,7 @@ A Mixin class for sklearn SGD regressors with FHE. This class is used to create a SGD regressor class what can be exported to ONNX using Hummingbird. - + ### method `__init__` @@ -3285,7 +3296,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -3301,7 +3312,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -3317,7 +3328,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -3329,7 +3340,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -3344,6 +3356,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -3351,7 +3364,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -3361,7 +3374,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -3377,7 +3390,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -3393,7 +3406,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -3409,7 +3422,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -3419,7 +3432,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -3446,7 +3459,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -3473,7 +3486,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -3495,7 +3508,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -3515,7 +3528,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -3539,7 +3552,7 @@ For some simple models such a linear regression, there is no post-processing ste ______________________________________________________________________ - + ### method `predict` @@ -3563,7 +3576,7 @@ Predict values for X, in FHE or in the clear. ______________________________________________________________________ - + ### method `quantize_input` @@ -3573,7 +3586,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnSGDClassifierMixin` @@ -3581,7 +3594,7 @@ A Mixin class for sklearn SGD classifiers with FHE. This class is used to create a SGD classifier class what can be exported to ONNX using Hummingbird. - + ### method `__init__` @@ -3667,7 +3680,7 @@ Using this attribute is deprecated. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -3683,7 +3696,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -3699,7 +3712,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -3711,7 +3724,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -3726,6 +3740,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -3733,7 +3748,7 @@ Compile the model. ______________________________________________________________________ - + ### method `decision_function` @@ -3757,7 +3772,7 @@ Predict confidence scores. ______________________________________________________________________ - + ### method `dequantize_output` @@ -3767,7 +3782,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -3783,7 +3798,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -3799,7 +3814,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -3815,7 +3830,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -3825,7 +3840,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -3852,7 +3867,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### classmethod `from_sklearn_model` @@ -3879,7 +3894,7 @@ The FHE-compliant fitted model. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -3901,7 +3916,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### classmethod `load_dict` @@ -3921,7 +3936,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `post_processing` @@ -3931,7 +3946,7 @@ post_processing(y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `predict` @@ -3944,7 +3959,7 @@ predict( ______________________________________________________________________ - + ### method `predict_proba` @@ -3957,7 +3972,7 @@ predict_proba( ______________________________________________________________________ - + ### method `quantize_input` @@ -3967,7 +3982,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnKNeighborsMixin` @@ -3975,7 +3990,7 @@ A Mixin class for sklearn KNeighbors models with FHE. This class inherits from sklearn.base.BaseEstimator in order to have access to scikit-learn's `get_params` and `set_params` methods. - + ### method `__init__` @@ -4035,7 +4050,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -4051,7 +4066,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -4067,7 +4082,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -4079,7 +4094,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -4094,6 +4110,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -4101,7 +4118,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -4111,7 +4128,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -4127,7 +4144,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -4143,7 +4160,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -4159,7 +4176,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -4169,7 +4186,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -4196,7 +4213,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -4218,7 +4235,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### method `get_topk_labels` @@ -4242,7 +4259,7 @@ Return the K-nearest labels of each point. ______________________________________________________________________ - + ### classmethod `load_dict` @@ -4262,7 +4279,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `majority_vote` @@ -4282,7 +4299,7 @@ Determine the most common class among nearest neighborsfor each query. ______________________________________________________________________ - + ### method `post_processing` @@ -4304,7 +4321,7 @@ For KNN, the de-quantization step is not required. Because \_inference returns t ______________________________________________________________________ - + ### method `predict` @@ -4317,7 +4334,7 @@ predict( ______________________________________________________________________ - + ### method `quantize_input` @@ -4327,7 +4344,7 @@ quantize_input(X: 'ndarray') → ndarray ______________________________________________________________________ - + ## class `SklearnKNeighborsClassifierMixin` @@ -4335,7 +4352,7 @@ A Mixin class for sklearn KNeighbors classifiers with FHE. This class is used to create a KNeighbors classifier class that inherits from SklearnKNeighborsMixin and sklearn.base.ClassifierMixin. By inheriting from sklearn.base.ClassifierMixin, it allows this class to be recognized as a classifier." - + ### method `__init__` @@ -4395,7 +4412,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `check_model_is_compiled` @@ -4411,7 +4428,7 @@ Check if the model is compiled. ______________________________________________________________________ - + ### method `check_model_is_fitted` @@ -4427,7 +4444,7 @@ Check if the model is fitted. ______________________________________________________________________ - + ### method `compile` @@ -4439,7 +4456,8 @@ compile( show_mlir: 'bool' = False, p_error: 'Optional[float]' = None, global_p_error: 'Optional[float]' = None, - verbose: 'bool' = False + verbose: 'bool' = False, + device: 'str' = 'cpu' ) → Circuit ``` @@ -4454,6 +4472,7 @@ Compile the model. - `p_error` (Optional\[float\]): Probability of error of a single PBS. A p_error value cannot be given if a global_p_error value is already set. Default to None, which sets this error to a default value. - `global_p_error` (Optional\[float\]): Probability of error of the full circuit. A global_p_error value cannot be given if a p_error value is already set. This feature is not supported during the FHE simulation mode, meaning the probability is currently set to 0. Default to None, which sets this error to a default value. - `verbose` (bool): Indicate if compilation information should be printed during compilation. Default to False. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -4461,7 +4480,7 @@ Compile the model. ______________________________________________________________________ - + ### method `dequantize_output` @@ -4471,7 +4490,7 @@ dequantize_output(q_y_preds: 'ndarray') → ndarray ______________________________________________________________________ - + ### method `dump` @@ -4487,7 +4506,7 @@ Dump itself to a file. ______________________________________________________________________ - + ### method `dump_dict` @@ -4503,7 +4522,7 @@ Dump the object as a dict. ______________________________________________________________________ - + ### method `dumps` @@ -4519,7 +4538,7 @@ Dump itself to a string. ______________________________________________________________________ - + ### method `fit` @@ -4529,7 +4548,7 @@ fit(X: 'Data', y: 'Target', **fit_parameters) ______________________________________________________________________ - + ### method `fit_benchmark` @@ -4556,7 +4575,7 @@ The Concrete ML and float equivalent fitted estimators. ______________________________________________________________________ - + ### method `get_sklearn_params` @@ -4578,7 +4597,7 @@ This method is used to instantiate a scikit-learn model using the Concrete ML mo ______________________________________________________________________ - + ### method `get_topk_labels` @@ -4602,7 +4621,7 @@ Return the K-nearest labels of each point. ______________________________________________________________________ - + ### classmethod `load_dict` @@ -4622,7 +4641,7 @@ Load itself from a dict. ______________________________________________________________________ - + ### method `majority_vote` @@ -4642,7 +4661,7 @@ Determine the most common class among nearest neighborsfor each query. ______________________________________________________________________ - + ### method `post_processing` @@ -4664,7 +4683,7 @@ For KNN, the de-quantization step is not required. Because \_inference returns t ______________________________________________________________________ - + ### method `predict` @@ -4677,7 +4696,7 @@ predict( ______________________________________________________________________ - + ### method `quantize_input` diff --git a/docs/references/api/concrete.ml.sklearn.linear_model.md b/docs/references/api/concrete.ml.sklearn.linear_model.md index 986a2d4b5..69b9492aa 100644 --- a/docs/references/api/concrete.ml.sklearn.linear_model.md +++ b/docs/references/api/concrete.ml.sklearn.linear_model.md @@ -223,7 +223,7 @@ Using this attribute is deprecated. ______________________________________________________________________ - + ### method `dump_dict` @@ -233,7 +233,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### method `fit` @@ -244,7 +244,8 @@ fit( fhe: Optional[str, FheMode] = None, coef_init: Optional[ndarray] = None, intercept_init: Optional[ndarray] = None, - sample_weight: Optional[ndarray] = None + sample_weight: Optional[ndarray] = None, + device: str = 'cpu' ) ``` @@ -263,6 +264,7 @@ For more details on some of these arguments please refer to: https://scikit-lear - `coef_init` (Optional\[numpy.ndarray\]): The initial coefficients to warm-start the optimization. Default to None. - `intercept_init` (Optional\[numpy.ndarray\]): The initial intercept to warm-start the optimization. Default to None. - `sample_weight` (Optional\[numpy.ndarray\]): Weights applied to individual samples (1. for unweighted). It is currently not supported for FHE training. Default to None. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** The fitted estimator. @@ -284,7 +286,7 @@ get_sklearn_params(deep: bool = True) → dict ______________________________________________________________________ - + ### classmethod `load_dict` @@ -294,7 +296,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ### method `partial_fit` @@ -324,7 +326,7 @@ This function does one iteration of SGD training. Looping n_times over this func ______________________________________________________________________ - + ### method `post_processing` @@ -360,7 +362,7 @@ The justification for the formula in the loss="modified_huber" case is in the ap ______________________________________________________________________ - + ## class `SGDRegressor` @@ -374,7 +376,7 @@ An FHE linear regression model fitted with stochastic gradient descent. For more details on SGDRegressor please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html - + ### method `__init__` @@ -449,7 +451,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `dump_dict` @@ -459,7 +461,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### classmethod `load_dict` @@ -469,7 +471,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ## class `ElasticNet` @@ -483,7 +485,7 @@ An ElasticNet regression model with FHE. For more details on ElasticNet please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html - + ### method `__init__` @@ -551,7 +553,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `dump_dict` @@ -561,7 +563,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### classmethod `load_dict` @@ -571,7 +573,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ## class `Lasso` @@ -585,7 +587,7 @@ A Lasso regression model with FHE. For more details on Lasso please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html - + ### method `__init__` @@ -652,7 +654,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `dump_dict` @@ -662,7 +664,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### classmethod `load_dict` @@ -672,7 +674,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ## class `Ridge` @@ -686,7 +688,7 @@ A Ridge regression model with FHE. For more details on Ridge please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html - + ### method `__init__` @@ -751,7 +753,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `dump_dict` @@ -761,7 +763,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### classmethod `load_dict` @@ -771,7 +773,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ## class `LogisticRegression` @@ -785,7 +787,7 @@ A logistic regression model with FHE. For more details on LogisticRegression please refer to the scikit-learn documentation: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html - + ### method `__init__` @@ -880,7 +882,7 @@ Using this attribute is deprecated. ______________________________________________________________________ - + ### method `dump_dict` @@ -890,7 +892,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### classmethod `load_dict` diff --git a/docs/references/api/concrete.ml.sklearn.tree_to_numpy.md b/docs/references/api/concrete.ml.sklearn.tree_to_numpy.md index ceb7f576a..d560d73cd 100644 --- a/docs/references/api/concrete.ml.sklearn.tree_to_numpy.md +++ b/docs/references/api/concrete.ml.sklearn.tree_to_numpy.md @@ -141,7 +141,7 @@ Apply pre-processing onto the ONNX graph. ______________________________________________________________________ - + ## function `tree_values_preprocessing` @@ -168,7 +168,7 @@ Pre-process tree values. ______________________________________________________________________ - + ## function `tree_to_numpy` @@ -201,7 +201,7 @@ Convert the tree inference to a numpy functions using Hummingbird. ______________________________________________________________________ - + ## function `onnx_fp32_model_to_quantized_model` diff --git a/docs/references/api/concrete.ml.sklearn.xgb.md b/docs/references/api/concrete.ml.sklearn.xgb.md index 9723b0d85..739a8a52f 100644 --- a/docs/references/api/concrete.ml.sklearn.xgb.md +++ b/docs/references/api/concrete.ml.sklearn.xgb.md @@ -155,7 +155,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ## class `XGBRegressor` @@ -163,7 +163,7 @@ Implements the XGBoost regressor. See https://xgboost.readthedocs.io/en/stable/python/python_api.html#module-xgboost.sklearn for more information about the parameters used. - + ### method `__init__` @@ -257,7 +257,7 @@ Is None if the model is not fitted. ______________________________________________________________________ - + ### method `dump_dict` @@ -267,7 +267,7 @@ dump_dict() → Dict[str, Any] ______________________________________________________________________ - + ### method `fit` @@ -277,7 +277,7 @@ fit(X, y, *args, **kwargs) → Any ______________________________________________________________________ - + ### classmethod `load_dict` @@ -287,7 +287,7 @@ load_dict(metadata: Dict) ______________________________________________________________________ - + ### method `post_processing` diff --git a/docs/references/api/concrete.ml.torch.compile.md b/docs/references/api/concrete.ml.torch.compile.md index 41d1d19ef..1000099e7 100644 --- a/docs/references/api/concrete.ml.torch.compile.md +++ b/docs/references/api/concrete.ml.torch.compile.md @@ -91,7 +91,7 @@ Take a model in torch or ONNX, turn it to numpy, quantize its inputs / weights / ______________________________________________________________________ - + ## function `compile_torch_model` @@ -109,7 +109,8 @@ compile_torch_model( global_p_error: Optional[float] = None, verbose: bool = False, inputs_encryption_status: Optional[Sequence[str]] = None, - reduce_sum_copy: bool = False + reduce_sum_copy: bool = False, + device: str = 'cpu' ) → QuantizedModule ``` @@ -134,6 +135,7 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs - `verbose` (bool): whether to show compilation information - `inputs_encryption_status` (Optional\[Sequence\[str\]\]): encryption status ('clear', 'encrypted') for each input. By default all arguments will be encrypted. - `reduce_sum_copy` (bool): if the inputs of QuantizedReduceSum should be copied to avoid bit-width propagation +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -141,7 +143,7 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs ______________________________________________________________________ - + ## function `compile_onnx_model` @@ -159,7 +161,8 @@ compile_onnx_model( global_p_error: Optional[float] = None, verbose: bool = False, inputs_encryption_status: Optional[Sequence[str]] = None, - reduce_sum_copy: bool = False + reduce_sum_copy: bool = False, + device: str = 'cpu' ) → QuantizedModule ``` @@ -184,6 +187,7 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs - `verbose` (bool): whether to show compilation information - `inputs_encryption_status` (Optional\[Sequence\[str\]\]): encryption status ('clear', 'encrypted') for each input. By default all arguments will be encrypted. - `reduce_sum_copy` (bool): if the inputs of QuantizedReduceSum should be copied to avoid bit-width propagation +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** @@ -191,7 +195,7 @@ Take a model in torch, turn it to numpy, quantize its inputs / weights / outputs ______________________________________________________________________ - + ## function `compile_brevitas_qat_model` @@ -209,7 +213,8 @@ compile_brevitas_qat_model( output_onnx_file: Union[NoneType, Path, str] = None, verbose: bool = False, inputs_encryption_status: Optional[Sequence[str]] = None, - reduce_sum_copy: bool = False + reduce_sum_copy: bool = False, + device: str = 'cpu' ) → QuantizedModule ``` @@ -232,6 +237,7 @@ The torch_model parameter is a subclass of torch.nn.Module that uses quantized o - `verbose` (bool): whether to show compilation information - `inputs_encryption_status` (Optional\[Sequence\[str\]\]): encryption status ('clear', 'encrypted') for each input. By default all arguments will be encrypted. - `reduce_sum_copy` (bool): if the inputs of QuantizedReduceSum should be copied to avoid bit-width propagation +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. **Returns:** diff --git a/docs/references/api/concrete.ml.torch.hybrid_model.md b/docs/references/api/concrete.ml.torch.hybrid_model.md index da91e7bb2..41508eaa6 100644 --- a/docs/references/api/concrete.ml.torch.hybrid_model.md +++ b/docs/references/api/concrete.ml.torch.hybrid_model.md @@ -72,7 +72,7 @@ Convert all Conv1D layers in a module or a Conv1D layer itself to nn.Linear. ______________________________________________________________________ - + ## class `HybridFHEMode` @@ -80,13 +80,13 @@ Simple enum for different modes of execution of HybridModel. ______________________________________________________________________ - + ## class `RemoteModule` A wrapper class for the modules to be evaluated remotely with FHE. - + ### method `__init__` @@ -102,7 +102,7 @@ __init__( ______________________________________________________________________ - + ### method `forward` @@ -133,7 +133,7 @@ To change the behavior of this forward function one must change the fhe_local_mo ______________________________________________________________________ - + ### method `init_fhe_client` @@ -187,13 +187,17 @@ This is done by converting targeted modules by RemoteModules. This will modify t **Args:** -- `model` (nn.Module): The model to modify (in-place modification) +- `model` (nn.Module): The model to modify (in-place modification). - `module_names` (Union\[str, List\[str\]\]): The module name(s) to replace with FHE server. -- `server_remote_address)`: The remote address of the FHE server -- `model_name` (str): Model name identifier -- `verbose` (int): If logs should be printed when interacting with FHE server +- `server_remote_address` (str): The remote address of the FHE server. +- `model_name` (str): Model name identifier. +- `verbose` (int): If logs should be printed when interacting with FHE server. - +**Raises:** + +- `TypeError`: If the provided model is not an instance of torch.nn.Module. + + ### method `__init__` @@ -201,7 +205,7 @@ This is done by converting targeted modules by RemoteModules. This will modify t __init__( model: Module, module_names: Union[str, List[str]], - server_remote_address=None, + server_remote_address: Optional[str] = None, model_name: str = 'model', verbose: int = 0 ) @@ -209,7 +213,7 @@ __init__( ______________________________________________________________________ - + ### method `compile_model` @@ -219,6 +223,7 @@ compile_model( n_bits: Union[int, Dict[str, int]] = 8, rounding_threshold_bits: Optional[int] = None, p_error: Optional[float] = None, + device: str = 'cpu', configuration: Optional[Configuration] = None ) ``` @@ -231,11 +236,33 @@ Compiles the specific layers to FHE. - `n_bits` (int): The bit precision for quantization during FHE model compilation. Default is 8. - `rounding_threshold_bits` (int): The number of bits to use for rounding threshold during FHE model compilation. Default is 8. - `p_error` (float): Error allowed for each table look-up in the circuit. +- `device`: FHE compilation device, can be either 'cpu' or 'cuda'. - `configuration` (Configuration): A concrete Configuration object specifying the FHE encryption parameters. If not specified, a default configuration is used. ______________________________________________________________________ - + + +### method `forward` + +```python +forward(x: Tensor, fhe: str = 'disable') → Tensor +``` + +Forward pass of the hybrid model. + +**Args:** + +- `x` (torch.Tensor): The input tensor. +- `fhe` (str): The Fully Homomorphic Encryption (FHE) mode (default is "disable"). + +**Returns:** + +- `torch.Tensor`: The output tensor. + +______________________________________________________________________ + + ### method `init_client` @@ -255,7 +282,7 @@ Initialize client for all remote modules. ______________________________________________________________________ - + ### method `publish_to_hub` @@ -267,12 +294,12 @@ Allow the user to push the model and FHE required files to HF Hub. ______________________________________________________________________ - + ### method `save_and_clear_private_info` ```python -save_and_clear_private_info(path: Path, via_mlir=False) +save_and_clear_private_info(path: Path, via_mlir=True) ``` Save the PyTorch model to the provided path and also saves the corresponding FHE circuit. @@ -284,7 +311,7 @@ Save the PyTorch model to the provided path and also saves the corresponding FHE ______________________________________________________________________ - + ### method `set_fhe_mode` @@ -300,7 +327,7 @@ Set Hybrid FHE mode for all remote modules. ______________________________________________________________________ - + ## class `LoggerStub` @@ -308,7 +335,7 @@ Placeholder type for a typical logger like the one from loguru. ______________________________________________________________________ - + ### method `info` @@ -324,7 +351,7 @@ Placholder function for logger.info. ______________________________________________________________________ - + ## class `HybridFHEModelServer` @@ -332,7 +359,7 @@ Hybrid FHE Model Server. This is a class object to server FHE models serialized using HybridFHEModel. - + ### method `__init__` @@ -342,7 +369,7 @@ __init__(key_path: Path, model_dir: Path, logger: Optional[LoggerStub]) ______________________________________________________________________ - + ### method `add_key` @@ -365,7 +392,7 @@ Dict\[str, str\] ______________________________________________________________________ - + ### method `check_inputs` @@ -391,7 +418,7 @@ Check that the given configuration exist in the compiled models folder. ______________________________________________________________________ - + ### method `compute` @@ -421,7 +448,7 @@ Compute the circuit over encrypted input. ______________________________________________________________________ - + ### method `dump_key` @@ -438,7 +465,7 @@ Dump a public key to a stream. ______________________________________________________________________ - + ### method `get_circuit` @@ -460,7 +487,7 @@ Get circuit based on model name, module name and input shape. ______________________________________________________________________ - + ### method `get_client` @@ -486,7 +513,7 @@ Get client. ______________________________________________________________________ - + ### method `list_modules` @@ -505,7 +532,7 @@ Dict\[str, Dict\[str, Dict\]\] ______________________________________________________________________ - + ### method `list_shapes` @@ -525,7 +552,7 @@ Dict\[str, Dict\] ______________________________________________________________________ - + ### method `load_key` diff --git a/docs/references/api/concrete.ml.torch.lora.md b/docs/references/api/concrete.ml.torch.lora.md new file mode 100644 index 000000000..7720a6410 --- /dev/null +++ b/docs/references/api/concrete.ml.torch.lora.md @@ -0,0 +1,312 @@ + + + + +# module `concrete.ml.torch.lora` + +This module contains classes for LoRA (Low-Rank Adaptation) training and custom layers. + +## **Global Variables** + +- **LINEAR_LAYERS** + +______________________________________________________________________ + + + +## function `get_remote_names` + +```python +get_remote_names( + model: Module, + include_embedding_layers: bool = False +) → List[str] +``` + +Get names of modules to be executed remotely. + +**Args:** + +- `model` (torch.nn.Module): The model to inspect. +- `include_embedding_layers` (bool): Whether to include embedding layers. + +**Returns:** + +- `List[str]`: List of module names to be executed remotely. + +______________________________________________________________________ + + + +## class `LoraTraining` + +LoraTraining module for fine-tuning with LoRA in a hybrid model setting. + +This class is designed to enable Low-Rank Adaptation (LoRA) fine-tuning in a hybrid model context. It allows selective execution of forward and backward passes in FHE. + +The class replaces standard linear layers with custom layers that are compatible with LoRA and FHE operations. It provides mechanisms to toggle between calibration and optimization modes. + +**Args:** + +- `inference_model` (torch.nn.Module): The base model to be fine-tuned. + + + +### method `__init__` + +```python +__init__(inference_model) → None +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(inputs) +``` + +Forward pass of the LoRA training module. + +**Args:** + +- `inputs`: A tuple containing input tensors and labels. + +**Returns:** +A tuple containing the loss and gradient norm. + +**Raises:** + +- `ValueError`: If the model does not return a loss when `self.loss_fn` is None. + +______________________________________________________________________ + + + +### method `replace_layers_with_custom` + +```python +replace_layers_with_custom(model: Module, skip_first: bool = True) +``` + +Replace linear layers with custom ones. + +This method replaces eligible linear layers in the model with custom layers that are compatible with the LoRA training procedure. + +**Args:** + +- `model` (torch.nn.Module): The model to replace layers in. +- `skip_first` (bool): Whether to skip the first eligible layer. + +______________________________________________________________________ + + + +### method `toggle_calibrate` + +```python +toggle_calibrate(enable: bool = True) +``` + +Toggle calibration mode. + +**Args:** + +- `enable` (bool): Whether to enable calibration mode. + +______________________________________________________________________ + + + +### method `toggle_run_optimizer` + +```python +toggle_run_optimizer(enable: bool = True) +``` + +Toggle optimizer execution. + +**Args:** + +- `enable` (bool): Whether to enable optimizer execution. + +______________________________________________________________________ + + + +### method `update_training_parameters` + +```python +update_training_parameters( + optimizer=None, + lr_scheduler=None, + loss_fn=None, + training_args=None +) +``` + +Update training parameters for the LoRA module. + +**Args:** + +- `optimizer` (optional): The optimizer to use for training. +- `lr_scheduler` (optional): The learning rate scheduler to use for training. +- `loss_fn` (callable, optional): Loss function to compute the loss. +- `training_args` (dict or namespace, optional): Training arguments containing 'gradient_accumulation_steps' and 'max_grad_norm'. + +______________________________________________________________________ + + + +## class `ForwardModuleLinear` + +Forward module for linear layers. + + + +### method `__init__` + +```python +__init__(weight, bias=None, weight_transposed=False) +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(input_tensor) +``` + +Forward pass for linear layers. + +**Args:** + +- `input_tensor`: The input tensor. + +**Returns:** +The output tensor after applying the linear transformation. + +______________________________________________________________________ + + + +## class `BackwardModuleLinear` + +Backward module for linear layers. + + + +### method `__init__` + +```python +__init__(weight, weight_transposed=False) +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(grad_output) +``` + +Backward pass for linear layers. + +**Args:** + +- `grad_output`: The gradient output tensor. + +**Returns:** +The gradient input tensor after applying the backward pass. + +______________________________________________________________________ + + + +## class `CustomLinear` + +Custom linear module. + + + +### method `__init__` + +```python +__init__(weight, bias=None, weight_transposed=False) +``` + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(input_tensor) +``` + +Forward pass of the custom linear module. + +**Args:** + +- `input_tensor`: The input tensor. + +**Returns:** +The output tensor after applying the custom linear module. + +______________________________________________________________________ + + + +## class `ForwardBackwardModule` + +Custom autograd function for forward and backward passes. + +______________________________________________________________________ + + + +### method `backward` + +```python +backward(ctx, grad_output) +``` + +Backward pass of the custom autograd function. + +**Args:** + +- `ctx`: The context object. +- `grad_output`: The gradient output tensor. + +**Returns:** +The gradient input tensor after applying the backward pass. + +______________________________________________________________________ + + + +### method `forward` + +```python +forward(ctx, input_tensor, forward_module, backward_module) +``` + +Forward pass of the custom autograd function. + +**Args:** + +- `ctx`: The context object. +- `input_tensor`: The input tensor. +- `forward_module`: The forward module. +- `backward_module`: The backward module. + +**Returns:** +The output tensor after applying the forward pass. diff --git a/docs/references/api/concrete.ml.torch.numpy_module.md b/docs/references/api/concrete.ml.torch.numpy_module.md index f955eae02..9ab83cfc7 100644 --- a/docs/references/api/concrete.ml.torch.numpy_module.md +++ b/docs/references/api/concrete.ml.torch.numpy_module.md @@ -50,7 +50,19 @@ Get the ONNX model. ______________________________________________________________________ - +#### property onnx_preprocessing + +Get the ONNX preprocessing. + +.. # noqa: DAR201 + +**Returns:** + +- `_onnx_preprocessing` (onnx.ModelProto): the ONNX preprocessing + +______________________________________________________________________ + + ### method `forward` @@ -67,3 +79,23 @@ Apply a forward pass on args with the equivalent numpy function only. **Returns:** - `Union[numpy.ndarray, Tuple[numpy.ndarray, ...]]`: result of the forward on the given inputs + +______________________________________________________________________ + + + +### method `pre_processing` + +```python +pre_processing(*args: ndarray) → Tuple[ndarray, ] +``` + +Apply a preprocessing pass on args with the equivalent numpy function only. + +**Args:** + +- `*args`: the inputs of the preprocessing function + +**Returns:** + +- `Union[numpy.ndarray, Tuple[numpy.ndarray, ...]]`: result of the preprocessing on the given inputs or the original inputs if no preprocessing function is defined diff --git a/pyproject.toml b/pyproject.toml index a65017b2b..777bd005d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "concrete-ml" -version = "1.6.0" +version = "1.7.0" description = "Concrete ML is an open-source set of tools which aims to simplify the use of fully homomorphic encryption (FHE) for data scientists." license = "BSD-3-Clause-Clear" authors = [ diff --git a/src/concrete/ml/version.py b/src/concrete/ml/version.py index 3e9c1ae30..571b5fa25 100644 --- a/src/concrete/ml/version.py +++ b/src/concrete/ml/version.py @@ -1,4 +1,4 @@ """File to manage the version of the package.""" # Auto-generated by "make set_version" do not modify -__version__ = "1.6.0" +__version__ = "1.7.0" diff --git a/use_case_examples/resnet/README.md b/use_case_examples/resnet/README.md index 42cf5bdf4..379ba1b07 100644 --- a/use_case_examples/resnet/README.md +++ b/use_case_examples/resnet/README.md @@ -103,10 +103,10 @@ CPU machine: 196 cores CPU machine (hp7c from AWS) GPU machine: 8xH100 GPU machine Summary of the ImageNet results: + - Accuracy is evaluated on 100 images - Runtime reported for **one** image - | w&a bits | p_error | Accuracy | Top-5 Accuracy | Runtime | Device | | -------- | ------- | -------- | -------------- | -------------- | ------ | | fp32 | - | 67% | 87% | - | - | @@ -114,7 +114,6 @@ Summary of the ImageNet results: | 6/6 | 0.05 | 55% | 78% | 1 h 31 min | CPU | | 7/7 | 0.05 | **66%** | **87%** | **2 h 12 min** | CPU | - 6/6 `n_bits` configuration: {"model_inputs": 8, "op_inputs": 6, "op_weights": 6, "model_outputs": 9} 7/7 `n_bits` configuration: {"model_inputs": 8, "op_inputs": 7, "op_weights": 7, "model_outputs": 9}