Commit
·
fcd2f2f
1
Parent(s):
0a94e09
Update README.md
Browse files
README.md
CHANGED
|
@@ -15,4 +15,31 @@ USER: {user_message2}
|
|
| 15 |
ASSISTANT: {assistant_message2}<|endoftext|>
|
| 16 |
USER: {user_message3}
|
| 17 |
ASSISTANT: {assistant_message3}<|endoftext|>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
```
|
|
|
|
| 15 |
ASSISTANT: {assistant_message2}<|endoftext|>
|
| 16 |
USER: {user_message3}
|
| 17 |
ASSISTANT: {assistant_message3}<|endoftext|>
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
## Tutorial
|
| 21 |
+
```python
|
| 22 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 23 |
+
import torch
|
| 24 |
+
|
| 25 |
+
tokenizer = AutoTokenizer.from_pretrained("sudy-super/baku-10b-chat")
|
| 26 |
+
model = AutoModelForCausalLM.from_pretrained("sudy-super/baku-10b-chat", device_map="auto", torch_dtype=torch.bfloat16)
|
| 27 |
+
|
| 28 |
+
raw_prompt = "仕事の熱意を取り戻すためのアイデアを5つ挙げてください。"
|
| 29 |
+
prompt = f"USER:{raw_prompt}\nASSISTANT:"
|
| 30 |
+
|
| 31 |
+
token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
| 32 |
+
with torch.no_grad():
|
| 33 |
+
output_ids = model.generate(
|
| 34 |
+
token_ids.to(model.device),
|
| 35 |
+
max_new_tokens=100,
|
| 36 |
+
do_sample=True,
|
| 37 |
+
temperature=0.8,
|
| 38 |
+
pad_token_id=tokenizer.pad_token_id,
|
| 39 |
+
bos_token_id=tokenizer.bos_token_id,
|
| 40 |
+
eos_token_id=tokenizer.eos_token_id
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
result = tokenizer.decode(output_ids.tolist()[0])
|
| 44 |
+
print(result)
|
| 45 |
```
|