brvm_finetuner / finetune.py
lamekemal's picture
Update finetune.py
d203955 verified
raw
history blame
2.85 kB
import torch
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
BitsAndBytesConfig,
TrainingArguments,
)
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from trl import SFTTrainer
# 1. Configurations
base_model = "mistralai/Mistral-7B-Instruct-v0.3"
new_model_dir = "./mistral-7b-brvm-finetuned"
output_dir = "./results"
# 2. Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Utilisation du périphérique: {device}")
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name(0)} - "
f"Mémoire: {torch.cuda.get_device_properties(0).total_memory / (1024**3):.2f} GB")
# 3. Dataset
dataset = load_dataset("lamekemal/brvm_finetune")
# 4. Quantization
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=False,
)
# 5. Charger modèle + tokenizer
model = AutoModelForCausalLM.from_pretrained(
base_model,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
)
model.config.use_cache = False
model = prepare_model_for_kbit_training(model)
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"
# 6. LoRA config
lora_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_config)
# 7. Training args
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=3,
per_device_train_batch_size=2,
gradient_accumulation_steps=4,
optim="paged_adamw_32bit",
save_steps=100,
logging_steps=10,
learning_rate=2e-4,
fp16=False,
bf16=torch.cuda.is_available(),
max_grad_norm=0.3,
warmup_ratio=0.03,
group_by_length=True,
lr_scheduler_type="cosine",
report_to="tensorboard",
evaluation_strategy="steps",
eval_steps=100,
save_total_limit=2,
load_best_model_at_end=True,
metric_for_best_model="eval_loss",
)
# 8. Trainer
trainer = SFTTrainer(
model=model,
train_dataset=dataset["train"],
eval_dataset=dataset["validation"],
peft_config=lora_config,
dataset_text_field="messages", # ⚠️ Vérifie bien que ton dataset a cette colonne
max_seq_length=512,
tokenizer=tokenizer,
args=training_args,
packing=False,
)
# 9. Fine-tuning
trainer.train()
# 10. Sauvegarde locale
trainer.save_model(new_model_dir)
print(f"✅ Modèle LoRA sauvegardé localement dans {new_model_dir}")