Update README.md
Browse filesAdd comment of max_seq_len to sentence transformers, especially for efficient training:
```python
# Optionally, lower the maximum sequence length for lower memory usage
# model.max_seq_length = 8192
```
README.md
CHANGED
|
@@ -77,6 +77,9 @@ model = SentenceTransformer("Qwen/Qwen3-Embedding-8B")
|
|
| 77 |
# tokenizer_kwargs={"padding_side": "left"},
|
| 78 |
# )
|
| 79 |
|
|
|
|
|
|
|
|
|
|
| 80 |
# The queries and documents to embed
|
| 81 |
queries = [
|
| 82 |
"What is the capital of China?",
|
|
@@ -282,4 +285,4 @@ If you find our work helpful, feel free to give us a cite.
|
|
| 282 |
journal={arXiv preprint arXiv:2506.05176},
|
| 283 |
year={2025}
|
| 284 |
}
|
| 285 |
-
```
|
|
|
|
| 77 |
# tokenizer_kwargs={"padding_side": "left"},
|
| 78 |
# )
|
| 79 |
|
| 80 |
+
# Optionally, lower the maximum sequence length for lower memory usage
|
| 81 |
+
# model.max_seq_length = 8192
|
| 82 |
+
|
| 83 |
# The queries and documents to embed
|
| 84 |
queries = [
|
| 85 |
"What is the capital of China?",
|
|
|
|
| 285 |
journal={arXiv preprint arXiv:2506.05176},
|
| 286 |
year={2025}
|
| 287 |
}
|
| 288 |
+
```
|