Update README.md
Browse files
README.md
CHANGED
|
@@ -332,7 +332,7 @@ Then you can load this model and run inference.
|
|
| 332 |
from sentence_transformers import SentenceTransformer
|
| 333 |
|
| 334 |
# Download from the 🤗 Hub
|
| 335 |
-
model = SentenceTransformer("JacobLinCool/Qwen3-Embedding-GIR-1")
|
| 336 |
# Run inference
|
| 337 |
queries = [
|
| 338 |
"Generates samples of text from the provided vocabulary.\n\n Args:\n plain_vocab: vocabulary.\n distribution: distribution.\n train_samples: samples for training.\n length: length.\n\n Returns:\n train_indices (np.array of Integers): random integers for training.\n shape = [num_samples, length]\n test_indices (np.array of Integers): random integers for testing.\n shape = [num_samples, length]\n plain_vocab (list of Integers): unique vocabularies.",
|
|
@@ -488,7 +488,7 @@ You can finetune this model on your own dataset.
|
|
| 488 |
- `load_best_model_at_end`: True
|
| 489 |
- `optim`: paged_adamw_8bit
|
| 490 |
- `push_to_hub`: True
|
| 491 |
-
- `hub_model_id`: JacobLinCool/Qwen3-Embedding-GIR-1
|
| 492 |
- `hub_private_repo`: False
|
| 493 |
- `gradient_checkpointing`: True
|
| 494 |
- `eval_on_start`: True
|
|
@@ -578,7 +578,7 @@ You can finetune this model on your own dataset.
|
|
| 578 |
- `use_legacy_prediction_loop`: False
|
| 579 |
- `push_to_hub`: True
|
| 580 |
- `resume_from_checkpoint`: None
|
| 581 |
-
- `hub_model_id`: JacobLinCool/Qwen3-Embedding-GIR-1
|
| 582 |
- `hub_strategy`: every_save
|
| 583 |
- `hub_private_repo`: False
|
| 584 |
- `hub_always_push`: False
|
|
|
|
| 332 |
from sentence_transformers import SentenceTransformer
|
| 333 |
|
| 334 |
# Download from the 🤗 Hub
|
| 335 |
+
model = SentenceTransformer("JacobLinCool/Qwen3-Embedding-8B-GIR-1")
|
| 336 |
# Run inference
|
| 337 |
queries = [
|
| 338 |
"Generates samples of text from the provided vocabulary.\n\n Args:\n plain_vocab: vocabulary.\n distribution: distribution.\n train_samples: samples for training.\n length: length.\n\n Returns:\n train_indices (np.array of Integers): random integers for training.\n shape = [num_samples, length]\n test_indices (np.array of Integers): random integers for testing.\n shape = [num_samples, length]\n plain_vocab (list of Integers): unique vocabularies.",
|
|
|
|
| 488 |
- `load_best_model_at_end`: True
|
| 489 |
- `optim`: paged_adamw_8bit
|
| 490 |
- `push_to_hub`: True
|
| 491 |
+
- `hub_model_id`: JacobLinCool/Qwen3-Embedding-8B-GIR-1
|
| 492 |
- `hub_private_repo`: False
|
| 493 |
- `gradient_checkpointing`: True
|
| 494 |
- `eval_on_start`: True
|
|
|
|
| 578 |
- `use_legacy_prediction_loop`: False
|
| 579 |
- `push_to_hub`: True
|
| 580 |
- `resume_from_checkpoint`: None
|
| 581 |
+
- `hub_model_id`: JacobLinCool/Qwen3-Embedding-8B-GIR-1
|
| 582 |
- `hub_strategy`: every_save
|
| 583 |
- `hub_private_repo`: False
|
| 584 |
- `hub_always_push`: False
|