File size: 2,420 Bytes
82bf711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a43239d
82bf711
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# finetune_full.py
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
import os

os.environ["OMP_NUM_THREADS"] = "8"

base_model = "mistralai/Mistral-7B-Instruct-v0.3"
new_model_dir = "./mistral-7b-brvm-full-finetuned"
output_dir = "./results_full"

# 1. Dataset
dataset = load_dataset("lamekemal/brvm_finetune")

# 2. Charger modèle + tokenizer en FP16
model = AutoModelForCausalLM.from_pretrained(
    base_model,
    torch_dtype=torch.float16,
    device_map="auto",
    trust_remote_code=True,
)
model.config.use_cache = False
model.gradient_checkpointing_enable()

tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"

# 3. Prétraitement
def tokenize_function(examples):
    texts = [
        f"Instruction: {instr}\nRéponse: {resp}"
        for instr, resp in zip(examples["instruction"], examples["response"])
    ]
    return tokenizer(
        texts,
        truncation=True,
        padding="max_length",
        max_length=512,
    )

tokenized_datasets = dataset.map(tokenize_function, batched=True)

# 4. Arguments d’entraînement
training_args = TrainingArguments(
    output_dir=output_dir,
    num_train_epochs=3,
    per_device_train_batch_size=4,   # full finetune = VRAM lourd
    gradient_accumulation_steps=4,
    optim="adamw_torch_fused",
    save_steps=100,
    logging_steps=10,
    learning_rate=2e-5,
    fp16=True,
    max_grad_norm=1.0,
    warmup_ratio=0.03,
    lr_scheduler_type="cosine",
    report_to="tensorboard",
    eval_strategy="steps",
    eval_steps=100,
    save_total_limit=2,
    load_best_model_at_end=True,
    metric_for_best_model="eval_loss",
)

# 5. Trainer classique (pas LoRA)
trainer = Trainer(
    model=model,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["validation"],
    args=training_args,
)

# 6. Entraînement
trainer.train()

# 7. Sauvegarde locale et push Hub
trainer.save_model(new_model_dir)
tokenizer.save_pretrained(new_model_dir)

from huggingface_hub import HfApi
api = HfApi()
repo_id = "lamekemal/mistral-7b-brvm-full-finetuned"
trainer.push_to_hub(repo_id)
tokenizer.push_to_hub(repo_id)

print(f"✅ Full fine-tune sauvegardé dans {new_model_dir} et poussé sur Hugging Face Hub")