fangwu97 commited on
Commit
5fae3a6
·
verified ·
1 Parent(s): 0314f6f

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -0
README.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quickstart
2
+
3
+ ## Environment
4
+ ```
5
+ pip install vllm # vllm>=v0.8.5.post1 should work
6
+ pip install transformers # transformers>=4.52.4 should work
7
+ ```
8
+
9
+
10
+ ## Using vLLM to generate
11
+ ```python
12
+ from vllm import LLM, SamplingParams
13
+ from transformers import AutoTokenizer
14
+
15
+
16
+ def convert_question_to_messages(question: str):
17
+ messages = [
18
+ {"role": "user",
19
+ "content": question + " Let's think step by step and output the final answer within \\boxed{}."}
20
+ ]
21
+ return messages
22
+
23
+
24
+ model_id="ethan1115/DeepSearch-1.5B"
25
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
26
+
27
+ sampling_params = SamplingParams(
28
+ temperature=0.6,
29
+ top_p=0.95,
30
+ max_tokens=32768
31
+ )
32
+
33
+ model = LLM(
34
+ model=model_id,
35
+ tensor_parallel_size=1
36
+ )
37
+ prompt = tokenizer.apply_chat_template(
38
+ convert_question_to_messages("Find the sum of all integer bases $b>9$ for which $17_{b}$ is a divisor of $97_{b}$."),
39
+ add_generation_prompt=True,
40
+ tokenize=False
41
+ )
42
+
43
+ outputs = model.generate({"prompt": prompt}, sampling_params=sampling_params, use_tqdm=False)
44
+ response = outputs[0].outputs[0].text
45
+ print(response)
46
+ ```