Update handler.py
Browse files- handler.py +2 -2
handler.py
CHANGED
|
@@ -13,7 +13,7 @@ def preprocessing(data):
|
|
| 13 |
if len(data[i:]) > 4000:
|
| 14 |
string = str(data[i:i+4000])
|
| 15 |
texts.append(string)
|
| 16 |
-
i = i +
|
| 17 |
else:
|
| 18 |
string = str(data[i:])
|
| 19 |
texts.append(string)
|
|
@@ -37,7 +37,7 @@ class EndpointHandler:
|
|
| 37 |
|
| 38 |
# process input
|
| 39 |
texts = preprocessing(inputs)
|
| 40 |
-
inputs = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True
|
| 41 |
|
| 42 |
with torch.no_grad():
|
| 43 |
output = self.model.generate(input_ids=inputs["input_ids"].to(device), max_new_tokens=60, do_sample=True, top_p=0.9)
|
|
|
|
| 13 |
if len(data[i:]) > 4000:
|
| 14 |
string = str(data[i:i+4000])
|
| 15 |
texts.append(string)
|
| 16 |
+
i = i + 3800
|
| 17 |
else:
|
| 18 |
string = str(data[i:])
|
| 19 |
texts.append(string)
|
|
|
|
| 37 |
|
| 38 |
# process input
|
| 39 |
texts = preprocessing(inputs)
|
| 40 |
+
inputs = self.tokenizer(texts, return_tensors="pt", padding=True, ) # truncation=True
|
| 41 |
|
| 42 |
with torch.no_grad():
|
| 43 |
output = self.model.generate(input_ids=inputs["input_ids"].to(device), max_new_tokens=60, do_sample=True, top_p=0.9)
|