Skip to content

Commit

Permalink
chore: fix tests and refresh LoraMLP notebook
Browse files Browse the repository at this point in the history
  • Loading branch information
jfrery committed Nov 15, 2024
1 parent 39a0004 commit a205373
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 61 deletions.
88 changes: 44 additions & 44 deletions docs/advanced_examples/LoraMLP.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
{
"data": {
"text/plain": [
"<torch._C.Generator at 0x7fa754b0e250>"
"<torch._C.Generator at 0x7ffa268f2530>"
]
},
"execution_count": 1,
Expand Down Expand Up @@ -324,176 +324,176 @@
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"\r",
"Training: 0%| | 0/10 [00:00<?, ?epoch/s]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 0%| | 0/10 [00:30<?, ?epoch/s, Avg Loss=2.3775, Time=30.94s, FHE Mode=execute]"
"\r",
"Training: 0%| | 0/10 [00:34<?, ?epoch/s, Epoch=1, Avg Loss=2.3775, Time=34.38s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 10%|█ | 1/10 [00:30<04:38, 30.95s/epoch, Avg Loss=2.3775, Time=30.94s, FHE Mode=execute]"
"\r",
"Training: 10%|█ | 1/10 [00:34<05:09, 34.38s/epoch, Epoch=1, Avg Loss=2.3775, Time=34.38s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 10%|█ | 1/10 [01:01<04:38, 30.95s/epoch, Avg Loss=1.6292, Time=30.71s, FHE Mode=execute]"
"\r",
"Training: 10%|█ | 1/10 [01:07<05:09, 34.38s/epoch, Epoch=2, Avg Loss=1.6292, Time=32.99s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 20%|██ | 2/10 [01:01<04:06, 30.81s/epoch, Avg Loss=1.6292, Time=30.71s, FHE Mode=execute]"
"\r",
"Training: 20%|██ | 2/10 [01:07<04:28, 33.56s/epoch, Epoch=2, Avg Loss=1.6292, Time=32.99s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 20%|██ | 2/10 [01:32<04:06, 30.81s/epoch, Avg Loss=0.8214, Time=30.98s, FHE Mode=execute]"
"\r",
"Training: 20%|██ | 2/10 [01:39<04:28, 33.56s/epoch, Epoch=3, Avg Loss=0.8214, Time=31.86s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 30%|███ | 3/10 [01:32<03:36, 30.89s/epoch, Avg Loss=0.8214, Time=30.98s, FHE Mode=execute]"
"\r",
"Training: 30%|███ | 3/10 [01:39<03:49, 32.79s/epoch, Epoch=3, Avg Loss=0.8214, Time=31.86s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 30%|███ | 3/10 [02:03<03:36, 30.89s/epoch, Avg Loss=0.5415, Time=30.74s, FHE Mode=execute]"
"\r",
"Training: 30%|███ | 3/10 [02:10<03:49, 32.79s/epoch, Epoch=4, Avg Loss=0.5415, Time=31.45s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 40%|████ | 4/10 [02:03<03:04, 30.83s/epoch, Avg Loss=0.5415, Time=30.74s, FHE Mode=execute]"
"\r",
"Training: 40%|████ | 4/10 [02:10<03:13, 32.26s/epoch, Epoch=4, Avg Loss=0.5415, Time=31.45s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 40%|████ | 4/10 [02:34<03:04, 30.83s/epoch, Avg Loss=0.3884, Time=30.87s, FHE Mode=execute]"
"\r",
"Training: 40%|████ | 4/10 [02:42<03:13, 32.26s/epoch, Epoch=5, Avg Loss=0.3884, Time=31.78s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 50%|█████ | 5/10 [02:34<02:34, 30.85s/epoch, Avg Loss=0.3884, Time=30.87s, FHE Mode=execute]"
"\r",
"Training: 50%|█████ | 5/10 [02:42<02:40, 32.09s/epoch, Epoch=5, Avg Loss=0.3884, Time=31.78s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 50%|█████ | 5/10 [03:05<02:34, 30.85s/epoch, Avg Loss=0.3246, Time=30.80s, FHE Mode=execute]"
"\r",
"Training: 50%|█████ | 5/10 [03:14<02:40, 32.09s/epoch, Epoch=6, Avg Loss=0.3246, Time=32.02s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 60%|██████ | 6/10 [03:05<02:03, 30.83s/epoch, Avg Loss=0.3246, Time=30.80s, FHE Mode=execute]"
"\r",
"Training: 60%|██████ | 6/10 [03:14<02:08, 32.07s/epoch, Epoch=6, Avg Loss=0.3246, Time=32.02s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 60%|██████ | 6/10 [03:35<02:03, 30.83s/epoch, Avg Loss=0.3145, Time=30.63s, FHE Mode=execute]"
"\r",
"Training: 60%|██████ | 6/10 [03:45<02:08, 32.07s/epoch, Epoch=7, Avg Loss=0.3145, Time=31.47s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 70%|███████ | 7/10 [03:35<01:32, 30.77s/epoch, Avg Loss=0.3145, Time=30.63s, FHE Mode=execute]"
"\r",
"Training: 70%|███████ | 7/10 [03:45<01:35, 31.87s/epoch, Epoch=7, Avg Loss=0.3145, Time=31.47s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 70%|███████ | 7/10 [04:06<01:32, 30.77s/epoch, Avg Loss=0.2942, Time=30.63s, FHE Mode=execute]"
"\r",
"Training: 70%|███████ | 7/10 [04:17<01:35, 31.87s/epoch, Epoch=8, Avg Loss=0.2942, Time=31.38s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 80%|████████ | 8/10 [04:06<01:01, 30.73s/epoch, Avg Loss=0.2942, Time=30.63s, FHE Mode=execute]"
"\r",
"Training: 80%|████████ | 8/10 [04:17<01:03, 31.72s/epoch, Epoch=8, Avg Loss=0.2942, Time=31.38s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 80%|████████ | 8/10 [04:36<01:01, 30.73s/epoch, Avg Loss=0.2913, Time=30.59s, FHE Mode=execute]"
"\r",
"Training: 80%|████████ | 8/10 [04:49<01:03, 31.72s/epoch, Epoch=9, Avg Loss=0.2913, Time=31.65s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 90%|█████████ | 9/10 [04:36<00:30, 30.68s/epoch, Avg Loss=0.2913, Time=30.59s, FHE Mode=execute]"
"\r",
"Training: 90%|█████████ | 9/10 [04:49<00:31, 31.70s/epoch, Epoch=9, Avg Loss=0.2913, Time=31.65s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 90%|█████████ | 9/10 [05:06<00:30, 30.68s/epoch, Avg Loss=0.2978, Time=29.99s, FHE Mode=execute]"
"\r",
"Training: 90%|█████████ | 9/10 [05:20<00:31, 31.70s/epoch, Epoch=10, Avg Loss=0.2978, Time=31.63s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 100%|██████████| 10/10 [05:06<00:00, 30.47s/epoch, Avg Loss=0.2978, Time=29.99s, FHE Mode=execute]"
"\r",
"Training: 100%|██████████| 10/10 [05:20<00:00, 31.68s/epoch, Epoch=10, Avg Loss=0.2978, Time=31.63s, FHE Mode=execute]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\r\n",
"Training: 100%|██████████| 10/10 [05:06<00:00, 30.69s/epoch, Avg Loss=0.2978, Time=29.99s, FHE Mode=execute]"
"\r",
"Training: 100%|██████████| 10/10 [05:20<00:00, 32.06s/epoch, Epoch=10, Avg Loss=0.2978, Time=31.63s, FHE Mode=execute]"
]
},
{
Expand Down
17 changes: 9 additions & 8 deletions src/concrete/ml/torch/hybrid_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ def __init__(
def _replace_modules(self):
"""Replace the private modules in the model with remote layers."""

self._has_large_linear_layers = True
self._has_only_large_linear_layers = True
for module_name in self.module_names:
# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3858
# Conv1d introduce reshaping operations which adds more TLU
Expand Down Expand Up @@ -417,7 +417,7 @@ def _replace_modules(self):
)

if not is_pure_linear_layer:
self._has_large_linear_layers = False
self._has_only_large_linear_layers = False

for module_name in self.module_names:
# Create the optimized glwe linear layer executor if needed
Expand All @@ -427,7 +427,7 @@ def _replace_modules(self):
module_name=module_name,
model_name=self.model_name,
verbose=self.verbose,
optimized_linear_execution=(self._has_large_linear_layers),
optimized_linear_execution=(self._has_only_large_linear_layers),
)

self.remote_modules[module_name] = remote_module
Expand Down Expand Up @@ -457,7 +457,7 @@ def forward(self, x: torch.Tensor, fhe: str = "disable") -> torch.Tensor:
# Validate the FHE mode
fhe_mode = HybridFHEMode(fhe)

if _HAS_GLWE_BACKEND and self._has_large_linear_layers:
if _HAS_GLWE_BACKEND and self._has_only_large_linear_layers:
if fhe_mode == HybridFHEMode.SIMULATE:
raise AssertionError(
"When the HybridFHEModel is instantiated with only "
Expand All @@ -468,9 +468,10 @@ def forward(self, x: torch.Tensor, fhe: str = "disable") -> torch.Tensor:
# Initialize executor only if not already done
if self.executor is None:
self.executor = GLWELinearLayerExecutor()
# Generate keys only if needed and not already done
if fhe_mode != HybridFHEMode.DISABLE:
self.executor.keygen()

# Generate keys only if needed and not already done
if fhe_mode != HybridFHEMode.DISABLE and self.executor.private_key is None:
self.executor.keygen()

# Update executor for all remote modules
for module in self.remote_modules.values():
Expand Down Expand Up @@ -580,7 +581,7 @@ def compile_model(
# If all layers are linear and the GLWE backend is available
# then simply quantize the model without compiling with
# Concrete Python.
if self._has_large_linear_layers and _HAS_GLWE_BACKEND:
if self._has_only_large_linear_layers and _HAS_GLWE_BACKEND:
self.private_q_modules[name] = build_quantized_module(
self.private_modules[name],
calibration_data_tensor,
Expand Down
2 changes: 1 addition & 1 deletion tests/torch/test_hybrid_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ def prepare_data(x, y, test_size=0.1, random_state=42):
# were linear and were replaced with the GLWE backend
# Check if GLWE optimization should be used based on input dimension
should_use_glwe = n_hidden >= 512
is_pure_linear = hybrid_local._has_large_linear_layers # pylint: disable=protected-access
is_pure_linear = hybrid_local._has_only_large_linear_layers # pylint: disable=protected-access
assert is_pure_linear == should_use_glwe

hybrid_local.compile_model(x1_train, n_bits=10)
Expand Down
16 changes: 8 additions & 8 deletions use_case_examples/deployment/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,26 @@ This folder contains examples of how to deploy Concrete ML models using Fully Ho
The deployment process generally follows these steps:

1. Train the model (optional, depending on the use case)
2. Compile the model to an FHE circuit
3. Deploy the model using Docker
4. Run inference using a client (locally or in Docker)
1. Compile the model to an FHE circuit
1. Deploy the model using Docker
1. Run inference using a client (locally or in Docker)

## Available Examples

We provide three different use cases to demonstrate the deployment process:

1. [Breast Cancer Classification](./breast_cancer/README.md)
2. [Sentiment Analysis](./sentiment_analysis/README.md)
3. [CIFAR-10 Image Classification](./cifar/README.md)
1. [Sentiment Analysis](./sentiment_analysis/README.md)
1. [CIFAR-10 Image Classification](./cifar/README.md)

## Getting Started

Each example folder contains its own README with specific instructions. However, the general process is similar:

1. Train or compile the model using the provided scripts
2. Deploy the model using `deploy_to_docker.py` from the `server` folder
3. Build the client Docker image
4. Run the client to interact with the deployed model
1. Deploy the model using `deploy_to_docker.py` from the `server` folder
1. Build the client Docker image
1. Run the client to interact with the deployed model

For detailed instructions, please refer to the README in each example folder.

Expand Down

0 comments on commit a205373

Please sign in to comment.