youtiva-123 commited on
Commit
54e7dff
Β·
verified Β·
1 Parent(s): a7c1228

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from peft import PeftModel
4
+ import torch
5
+ from huggingface_hub import login
6
+
7
+ def merge_model(hf_token):
8
+ try:
9
+ login(token=hf_token)
10
+ yield "βœ“ Logged in successfully\n"
11
+
12
+ yield "Loading base model (this takes 2-3 min)...\n"
13
+ base_model = AutoModelForCausalLM.from_pretrained(
14
+ "HuggingFaceH4/zephyr-7b-beta",
15
+ torch_dtype=torch.float16,
16
+ device_map="auto",
17
+ low_cpu_mem_usage=True
18
+ )
19
+ yield "βœ“ Base model loaded\n"
20
+
21
+ yield "Loading tokenizer...\n"
22
+ tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
23
+ yield "βœ“ Tokenizer loaded\n"
24
+
25
+ yield "Loading your adapter...\n"
26
+ model = PeftModel.from_pretrained(base_model, "youtiva-123/autotrain-r6w6w-7pv2i")
27
+ yield "βœ“ Adapter loaded\n"
28
+
29
+ yield "Merging adapter with base model...\n"
30
+ merged_model = model.merge_and_unload()
31
+ yield "βœ“ Models merged\n"
32
+
33
+ yield "Uploading to youtiva-123/zephyr-youtiva-merged...\n"
34
+ merged_model.push_to_hub("youtiva-123/zephyr-youtiva-merged")
35
+ tokenizer.push_to_hub("youtiva-123/zephyr-youtiva-merged")
36
+
37
+ yield "βœ… SUCCESS! Your merged model is ready at:\nhttps://huggingface.co/youtiva-123/zephyr-youtiva-merged\n\nNow go deploy this model as an endpoint!"
38
+ except Exception as e:
39
+ yield f"❌ Error: {str(e)}"
40
+
41
+ interface = gr.Interface(
42
+ fn=merge_model,
43
+ inputs=gr.Textbox(label="Hugging Face Token (needs write access)", type="password"),
44
+ outputs=gr.Textbox(label="Status", lines=15),
45
+ title="Merge LoRA Adapter with Base Model"
46
+ )
47
+
48
+ interface.launch()