-
Notifications
You must be signed in to change notification settings - Fork 0
/
Finetune_Llama3.py
176 lines (154 loc) · 6.58 KB
/
Finetune_Llama3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
from geobleu.Report import report_geobleu_dtw_gpt
from unsloth import FastLanguageModel
import torch
import wandb
import os
os.environ["WANDB_PROJECT"] = "HuMob2024cityA"
os.environ["WANDB_LOG_MODEL"] = "checkpoint"
os.environ["WANDB_MODE"] = "offline"
max_seq_length = 50000 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
# 4bit pre quantized models we support for 4x faster downloading + no OOMs.
fourbit_models = [
"unsloth/mistral-7b-v0.3-bnb-4bit", # New Mistral v3 2x faster!
"unsloth/mistral-7b-instruct-v0.3-bnb-4bit",
"unsloth/llama-3-8b-bnb-4bit", # Llama-3 15 trillion tokens model 2x faster!
"unsloth/llama-3-8b-Instruct-bnb-4bit",
"unsloth/llama-3-70b-bnb-4bit",
"unsloth/Phi-3-mini-4k-instruct", # Phi-3 2x faster!
"unsloth/Phi-3-medium-4k-instruct",
"unsloth/mistral-7b-bnb-4bit",
"unsloth/gemma-7b-bnb-4bit", # Gemma 2.2x faster!
] # More models at https://huggingface.co/unsloth
model, tokenizer = FastLanguageModel.from_pretrained(
model_name = "unsloth/llama-3-8b-Instruct-bnb-4bit", # Choose ANY! eg teknium/OpenHermes-2.5-Mistral-7B
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
"""We now add LoRA adapters so we only need to update 1 to 10% of all parameters!"""
model = FastLanguageModel.get_peft_model(
model,
r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 16,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
import json
from datasets import Dataset
from unsloth.chat_templates import get_chat_template
def load_custom_dataset(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
# Create a dataset structure suitable for the datasets library
formatted_data = {
"conversations": [
convo["messages"] for convo in data
]
}
# Convert the dataset into the Hugging Face datasets format
dataset = Dataset.from_dict(formatted_data)
return dataset
def format_conversations(conversation):
formatted_convo = []
for message in conversation:
formatted_message = {
"from": message["role"],
"value": message["content"]
}
formatted_convo.append(formatted_message)
return formatted_convo
def formatting_prompts_func(examples):
convos = examples["conversations"]
texts = []
for convo in convos:
formatted_convo = format_conversations(convo)
try:
text = tokenizer.apply_chat_template(formatted_convo, tokenize=False, add_generation_prompt=False)
texts.append(text)
except Exception as e:
print(f"Error processing conversation: {convo}")
raise e
return {"text": texts}
# Initialize the tokenizer with the correct chat template
tokenizer = get_chat_template(
tokenizer,
chat_template="llama-3",
mapping={"role": "from", "content": "value", "user": "human", "assistant": "gpt"},
)
# Load and format the custom dataset
train_dataset = load_custom_dataset("datasets/datasetB_train_0-17599.json")
# train_dataset = train_dataset.shuffle(seed=42)
# train_dataset = train_dataset.select(range(2000, 2100))
train_dataset = train_dataset.map(formatting_prompts_func, batched=True)
val_dataset = load_custom_dataset("datasets/datasetB_eval_17600-21999.json")
val_dataset = val_dataset.select(range(100))
val_dataset = val_dataset.map(formatting_prompts_func, batched=True)
"""<a name="Train"></a>
### Train the model
Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`. We also support TRL's `DPOTrainer`!
"""
from trl import SFTTrainer
from transformers import TrainingArguments, EarlyStoppingCallback
from unsloth import is_bfloat16_supported
early_stopping_callback = EarlyStoppingCallback(
early_stopping_patience=3,
)
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = train_dataset,
eval_dataset= val_dataset,
dataset_text_field = "text",
max_seq_length = max_seq_length,
dataset_num_proc = 8,
packing = False, # Can make training 5x faster for short sequences.
args = TrainingArguments(
per_device_train_batch_size = 1,
gradient_accumulation_steps = 4,
warmup_steps = 5,
num_train_epochs = 3,
learning_rate = 2e-3,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
optim = "adamw_8bit",
weight_decay = 0.01,
lr_scheduler_type = "cosine_with_restarts",
seed = 3407,
output_dir = "outputs",
report_to = "wandb",
logging_steps = 1, # Change if needed
save_steps = 100, # Change if needed
run_name = "B_eval_finetune", # (Optional)
eval_strategy="steps",
eval_steps=100,
per_device_eval_batch_size=1,
load_best_model_at_end=True,
greater_is_better=False,
),
# callbacks=[early_stopping_callback],
)
#@title Show current memory stats
gpu_stats = torch.cuda.get_device_properties(0)
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
print(f"{start_gpu_memory} GB of memory reserved.")
# Initial finetuning
trainer_stats = trainer.train()
# If you want to continue finetuning from checkpoints
# run = wandb.init(name="B_finetune")
# artifact = run.use_artifact('user/HuMob2024cityA/model-B_eval_finetune:epoch_3.0', type='model')
# artifact_dir = artifact.download()
# trainer.train(resume_from_checkpoint=artifact_dir)
model.save_pretrained("lora_model") # Local saving
# model.push_to_hub("your_name/lora_model", token = "...") # Online saving