Skip to content

Commit

Permalink
chore: clean LoraMLP.ipynb
Browse files Browse the repository at this point in the history
  • Loading branch information
jfrery committed Sep 26, 2024
1 parent 4d2f2e6 commit cb1de4a
Showing 1 changed file with 24 additions and 25 deletions.
49 changes: 24 additions & 25 deletions docs/advanced_examples/LoraMLP.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,16 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<torch._C.Generator at 0x750a601abe10>"
"<torch._C.Generator at 0x7e237be93e10>"
]
},
"execution_count": 1,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -39,8 +39,7 @@
"import torch\n",
"from peft import LoraConfig, get_peft_model\n",
"from sklearn.datasets import make_circles, make_moons\n",
"from torch import nn\n",
"from torch.optim import optim\n",
"from torch import nn, optim\n",
"from torch.utils.data import DataLoader, TensorDataset\n",
"from tqdm import tqdm\n",
"\n",
Expand All @@ -64,7 +63,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 11,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -126,7 +125,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 12,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -248,7 +247,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -269,7 +268,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -298,14 +297,14 @@
")\n",
"\n",
"# Calibrate and compile the model\n",
"hybrid_model.model.toggle_calibrate(enable=True)\n",
"lora_training.toggle_calibrate(enable=True)\n",
"hybrid_model.compile_model(inputset, n_bits=8)\n",
"hybrid_model.model.toggle_calibrate(enable=False)"
"lora_training.toggle_calibrate(enable=False)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 15,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -351,9 +350,9 @@
" fhe (str): FHE mode ('disable', 'simulate', or 'execute').\n",
" \"\"\"\n",
" device = torch.device(\"cpu\")\n",
" hybrid_model.model.to(device)\n",
" hybrid_model.model.inference_model.train()\n",
" hybrid_model.model.toggle_run_optimizer(enable=True)\n",
" lora_training.to(device)\n",
" peft_model.train()\n",
" lora_training.toggle_run_optimizer(enable=True)\n",
"\n",
" # Create the main epoch progress bar\n",
" epoch_pbar = tqdm(range(num_epochs), desc=\"Training\", unit=\"epoch\")\n",
Expand Down Expand Up @@ -402,7 +401,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 16,
"metadata": {},
"outputs": [
{
Expand All @@ -418,15 +417,15 @@
],
"source": [
"# Enable LoRA adapters (already enabled by default)\n",
"hybrid_model.model.inference_model.enable_adapter_layers()\n",
"peft_model.enable_adapter_layers()\n",
"\n",
"# Plot datasets with decision boundaries after fine-tuning\n",
"plot_datasets_and_boundaries(\n",
" X_task1.numpy(),\n",
" y_task1.numpy(),\n",
" X_task2.numpy(),\n",
" y_task2.numpy(),\n",
" model=hybrid_model.model.inference_model,\n",
" model=peft_model,\n",
" titles=[\"Task 1 after Fine-tuning\", \"Task 2 after Fine-tuning\"],\n",
")"
]
Expand All @@ -440,7 +439,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 17,
"metadata": {},
"outputs": [
{
Expand All @@ -456,15 +455,15 @@
],
"source": [
"# Disable LoRA adapters\n",
"hybrid_model.model.inference_model.disable_adapter_layers()\n",
"peft_model.disable_adapter_layers()\n",
"\n",
"# Plot datasets with decision boundaries after fine-tuning\n",
"plot_datasets_and_boundaries(\n",
" X_task1.numpy(),\n",
" y_task1.numpy(),\n",
" X_task2.numpy(),\n",
" y_task2.numpy(),\n",
" model=hybrid_model.model.inference_model,\n",
" model=peft_model,\n",
" titles=[\"Task 1 after Fine-tuning\", \"Task 2 after Fine-tuning\"],\n",
")"
]
Expand All @@ -478,7 +477,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 18,
"metadata": {},
"outputs": [
{
Expand All @@ -492,10 +491,10 @@
],
"source": [
"# Enable LoRA adapters (already enabled by default)\n",
"hybrid_model.model.inference_model.enable_adapter_layers()\n",
"peft_model.enable_adapter_layers()\n",
"\n",
"# Print trainable (lora) parameters\n",
"hybrid_model.model.inference_model.print_trainable_parameters()\n",
"peft_model.print_trainable_parameters()\n",
"\n",
"# Save the model and remove all layers that will be done on the server\n",
"path = Path(\"lora_mlp\")\n",
Expand All @@ -506,7 +505,7 @@
"hybrid_model.save_and_clear_private_info(path)\n",
"\n",
"# At this point, the hybrid_model only contains the trainable parameters of the LoRA layers.\n",
"hybrid_model.model.inference_model.print_trainable_parameters()"
"peft_model.print_trainable_parameters()"
]
},
{
Expand Down

0 comments on commit cb1de4a

Please sign in to comment.