Update README.md
Browse files
README.md
CHANGED
|
@@ -59,8 +59,8 @@ Sample usage:
|
|
| 59 |
```python
|
| 60 |
from transformers import AutoModel, AutoTokenizer
|
| 61 |
|
| 62 |
-
tokenizer = AutoTokenizer.from_pretrained('dicta-il/dictabert-parse')
|
| 63 |
-
model = AutoModel.from_pretrained('dicta-il/dictabert-parse', trust_remote_code=True)
|
| 64 |
|
| 65 |
model.eval()
|
| 66 |
|
|
@@ -492,6 +492,14 @@ Output:
|
|
| 492 |
],
|
| 493 |
"root_idx": 2,
|
| 494 |
"ner_entities": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 495 |
{
|
| 496 |
"phrase": "讗驻专讬诐 拽讬砖讜谉",
|
| 497 |
"label": "PER",
|
|
@@ -553,7 +561,7 @@ Results:
|
|
| 553 |
|
| 554 |
## Citation
|
| 555 |
|
| 556 |
-
If you use DictaBERT-parse in your research, please cite ```MRL Parsing without Tears: The Case of Hebrew```
|
| 557 |
|
| 558 |
**BibTeX:**
|
| 559 |
|
|
|
|
| 59 |
```python
|
| 60 |
from transformers import AutoModel, AutoTokenizer
|
| 61 |
|
| 62 |
+
tokenizer = AutoTokenizer.from_pretrained('dicta-il/dictabert-large-parse')
|
| 63 |
+
model = AutoModel.from_pretrained('dicta-il/dictabert-large-parse', trust_remote_code=True)
|
| 64 |
|
| 65 |
model.eval()
|
| 66 |
|
|
|
|
| 492 |
],
|
| 493 |
"root_idx": 2,
|
| 494 |
"ner_entities": [
|
| 495 |
+
{
|
| 496 |
+
"phrase": "1948",
|
| 497 |
+
"label": "TIMEX",
|
| 498 |
+
"start": 5,
|
| 499 |
+
"end": 9,
|
| 500 |
+
"token_start": 1,
|
| 501 |
+
"token_end": 1
|
| 502 |
+
},
|
| 503 |
{
|
| 504 |
"phrase": "讗驻专讬诐 拽讬砖讜谉",
|
| 505 |
"label": "PER",
|
|
|
|
| 561 |
|
| 562 |
## Citation
|
| 563 |
|
| 564 |
+
If you use DictaBERT-large-parse in your research, please cite ```MRL Parsing without Tears: The Case of Hebrew```
|
| 565 |
|
| 566 |
**BibTeX:**
|
| 567 |
|