metadata
base_model: intfloat/multilingual-e5-small
datasets: []
language: []
library_name: sentence-transformers
metrics:
- cosine_accuracy
- cosine_accuracy_threshold
- cosine_f1
- cosine_f1_threshold
- cosine_precision
- cosine_recall
- cosine_ap
- dot_accuracy
- dot_accuracy_threshold
- dot_f1
- dot_f1_threshold
- dot_precision
- dot_recall
- dot_ap
- manhattan_accuracy
- manhattan_accuracy_threshold
- manhattan_f1
- manhattan_f1_threshold
- manhattan_precision
- manhattan_recall
- manhattan_ap
- euclidean_accuracy
- euclidean_accuracy_threshold
- euclidean_f1
- euclidean_f1_threshold
- euclidean_precision
- euclidean_recall
- euclidean_ap
- max_accuracy
- max_accuracy_threshold
- max_f1
- max_f1_threshold
- max_precision
- max_recall
- max_ap
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- generated_from_trainer
- dataset_size:971
- loss:OnlineContrastiveLoss
widget:
- source_sentence: Steps to bake a pie
sentences:
- How to bake a pie?
- What are the ingredients of a pizza?
- How to create a business plan?
- source_sentence: What are the benefits of yoga?
sentences:
- If I combine the yellow and blue colors, what color will I get?
- Can you help me understand this contract?
- What are the benefits of meditation?
- source_sentence: Capital city of Canada
sentences:
- What time does the movie start?
- Who is the President of the United States?
- What is the capital of Canada?
- source_sentence: Tell me about Shopify
sentences:
- Who discovered penicillin?
- Share info about Shopify
- Who invented the telephone?
- source_sentence: What is the melting point of ice at sea level?
sentences:
- What is the boiling point of water at sea level?
- Can you recommend a good restaurant nearby?
- Tell me a joke
model-index:
- name: SentenceTransformer based on intfloat/multilingual-e5-small
results:
- task:
type: binary-classification
name: Binary Classification
dataset:
name: pair class dev
type: pair-class-dev
metrics:
- type: cosine_accuracy
value: 0.9300411522633745
name: Cosine Accuracy
- type: cosine_accuracy_threshold
value: 0.788658857345581
name: Cosine Accuracy Threshold
- type: cosine_f1
value: 0.9237668161434978
name: Cosine F1
- type: cosine_f1_threshold
value: 0.7819762825965881
name: Cosine F1 Threshold
- type: cosine_precision
value: 0.8956521739130435
name: Cosine Precision
- type: cosine_recall
value: 0.9537037037037037
name: Cosine Recall
- type: cosine_ap
value: 0.9603135110633257
name: Cosine Ap
- type: dot_accuracy
value: 0.9300411522633745
name: Dot Accuracy
- type: dot_accuracy_threshold
value: 0.788658857345581
name: Dot Accuracy Threshold
- type: dot_f1
value: 0.9237668161434978
name: Dot F1
- type: dot_f1_threshold
value: 0.7819762229919434
name: Dot F1 Threshold
- type: dot_precision
value: 0.8956521739130435
name: Dot Precision
- type: dot_recall
value: 0.9537037037037037
name: Dot Recall
- type: dot_ap
value: 0.9603135110633257
name: Dot Ap
- type: manhattan_accuracy
value: 0.9218106995884774
name: Manhattan Accuracy
- type: manhattan_accuracy_threshold
value: 9.936657905578613
name: Manhattan Accuracy Threshold
- type: manhattan_f1
value: 0.914798206278027
name: Manhattan F1
- type: manhattan_f1_threshold
value: 10.316186904907227
name: Manhattan F1 Threshold
- type: manhattan_precision
value: 0.8869565217391304
name: Manhattan Precision
- type: manhattan_recall
value: 0.9444444444444444
name: Manhattan Recall
- type: manhattan_ap
value: 0.9578931449470002
name: Manhattan Ap
- type: euclidean_accuracy
value: 0.9300411522633745
name: Euclidean Accuracy
- type: euclidean_accuracy_threshold
value: 0.6501401662826538
name: Euclidean Accuracy Threshold
- type: euclidean_f1
value: 0.9237668161434978
name: Euclidean F1
- type: euclidean_f1_threshold
value: 0.6603381633758545
name: Euclidean F1 Threshold
- type: euclidean_precision
value: 0.8956521739130435
name: Euclidean Precision
- type: euclidean_recall
value: 0.9537037037037037
name: Euclidean Recall
- type: euclidean_ap
value: 0.9603135110633257
name: Euclidean Ap
- type: max_accuracy
value: 0.9300411522633745
name: Max Accuracy
- type: max_accuracy_threshold
value: 9.936657905578613
name: Max Accuracy Threshold
- type: max_f1
value: 0.9237668161434978
name: Max F1
- type: max_f1_threshold
value: 10.316186904907227
name: Max F1 Threshold
- type: max_precision
value: 0.8956521739130435
name: Max Precision
- type: max_recall
value: 0.9537037037037037
name: Max Recall
- type: max_ap
value: 0.9603135110633257
name: Max Ap
- task:
type: binary-classification
name: Binary Classification
dataset:
name: pair class test
type: pair-class-test
metrics:
- type: cosine_accuracy
value: 0.9300411522633745
name: Cosine Accuracy
- type: cosine_accuracy_threshold
value: 0.788658857345581
name: Cosine Accuracy Threshold
- type: cosine_f1
value: 0.9237668161434978
name: Cosine F1
- type: cosine_f1_threshold
value: 0.7819762825965881
name: Cosine F1 Threshold
- type: cosine_precision
value: 0.8956521739130435
name: Cosine Precision
- type: cosine_recall
value: 0.9537037037037037
name: Cosine Recall
- type: cosine_ap
value: 0.9603135110633257
name: Cosine Ap
- type: dot_accuracy
value: 0.9300411522633745
name: Dot Accuracy
- type: dot_accuracy_threshold
value: 0.788658857345581
name: Dot Accuracy Threshold
- type: dot_f1
value: 0.9237668161434978
name: Dot F1
- type: dot_f1_threshold
value: 0.7819762229919434
name: Dot F1 Threshold
- type: dot_precision
value: 0.8956521739130435
name: Dot Precision
- type: dot_recall
value: 0.9537037037037037
name: Dot Recall
- type: dot_ap
value: 0.9603135110633257
name: Dot Ap
- type: manhattan_accuracy
value: 0.9218106995884774
name: Manhattan Accuracy
- type: manhattan_accuracy_threshold
value: 9.936657905578613
name: Manhattan Accuracy Threshold
- type: manhattan_f1
value: 0.914798206278027
name: Manhattan F1
- type: manhattan_f1_threshold
value: 10.316186904907227
name: Manhattan F1 Threshold
- type: manhattan_precision
value: 0.8869565217391304
name: Manhattan Precision
- type: manhattan_recall
value: 0.9444444444444444
name: Manhattan Recall
- type: manhattan_ap
value: 0.9578931449470002
name: Manhattan Ap
- type: euclidean_accuracy
value: 0.9300411522633745
name: Euclidean Accuracy
- type: euclidean_accuracy_threshold
value: 0.6501401662826538
name: Euclidean Accuracy Threshold
- type: euclidean_f1
value: 0.9237668161434978
name: Euclidean F1
- type: euclidean_f1_threshold
value: 0.6603381633758545
name: Euclidean F1 Threshold
- type: euclidean_precision
value: 0.8956521739130435
name: Euclidean Precision
- type: euclidean_recall
value: 0.9537037037037037
name: Euclidean Recall
- type: euclidean_ap
value: 0.9603135110633257
name: Euclidean Ap
- type: max_accuracy
value: 0.9300411522633745
name: Max Accuracy
- type: max_accuracy_threshold
value: 9.936657905578613
name: Max Accuracy Threshold
- type: max_f1
value: 0.9237668161434978
name: Max F1
- type: max_f1_threshold
value: 10.316186904907227
name: Max F1 Threshold
- type: max_precision
value: 0.8956521739130435
name: Max Precision
- type: max_recall
value: 0.9537037037037037
name: Max Recall
- type: max_ap
value: 0.9603135110633257
name: Max Ap
SentenceTransformer based on intfloat/multilingual-e5-small
This is a sentence-transformers model finetuned from intfloat/multilingual-e5-small. It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
Model Details
Model Description
- Model Type: Sentence Transformer
- Base model: intfloat/multilingual-e5-small
- Maximum Sequence Length: 512 tokens
- Output Dimensionality: 384 tokens
- Similarity Function: Cosine Similarity
Model Sources
- Documentation: Sentence Transformers Documentation
- Repository: Sentence Transformers on GitHub
- Hugging Face: Sentence Transformers on Hugging Face
Full Model Architecture
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
(2): Normalize()
)
Usage
Direct Usage (Sentence Transformers)
First install the Sentence Transformers library:
pip install -U sentence-transformers
Then you can load this model and run inference.
from sentence_transformers import SentenceTransformer
# Download from the 🤗 Hub
model = SentenceTransformer("srikarvar/multilingual-e5-small-pairclass-4")
# Run inference
sentences = [
'What is the melting point of ice at sea level?',
'What is the boiling point of water at sea level?',
'Can you recommend a good restaurant nearby?',
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 384]
# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities.shape)
# [3, 3]
Evaluation
Metrics
Binary Classification
- Dataset:
pair-class-dev - Evaluated with
BinaryClassificationEvaluator
| Metric | Value |
|---|---|
| cosine_accuracy | 0.93 |
| cosine_accuracy_threshold | 0.7887 |
| cosine_f1 | 0.9238 |
| cosine_f1_threshold | 0.782 |
| cosine_precision | 0.8957 |
| cosine_recall | 0.9537 |
| cosine_ap | 0.9603 |
| dot_accuracy | 0.93 |
| dot_accuracy_threshold | 0.7887 |
| dot_f1 | 0.9238 |
| dot_f1_threshold | 0.782 |
| dot_precision | 0.8957 |
| dot_recall | 0.9537 |
| dot_ap | 0.9603 |
| manhattan_accuracy | 0.9218 |
| manhattan_accuracy_threshold | 9.9367 |
| manhattan_f1 | 0.9148 |
| manhattan_f1_threshold | 10.3162 |
| manhattan_precision | 0.887 |
| manhattan_recall | 0.9444 |
| manhattan_ap | 0.9579 |
| euclidean_accuracy | 0.93 |
| euclidean_accuracy_threshold | 0.6501 |
| euclidean_f1 | 0.9238 |
| euclidean_f1_threshold | 0.6603 |
| euclidean_precision | 0.8957 |
| euclidean_recall | 0.9537 |
| euclidean_ap | 0.9603 |
| max_accuracy | 0.93 |
| max_accuracy_threshold | 9.9367 |
| max_f1 | 0.9238 |
| max_f1_threshold | 10.3162 |
| max_precision | 0.8957 |
| max_recall | 0.9537 |
| max_ap | 0.9603 |
Binary Classification
- Dataset:
pair-class-test - Evaluated with
BinaryClassificationEvaluator
| Metric | Value |
|---|---|
| cosine_accuracy | 0.93 |
| cosine_accuracy_threshold | 0.7887 |
| cosine_f1 | 0.9238 |
| cosine_f1_threshold | 0.782 |
| cosine_precision | 0.8957 |
| cosine_recall | 0.9537 |
| cosine_ap | 0.9603 |
| dot_accuracy | 0.93 |
| dot_accuracy_threshold | 0.7887 |
| dot_f1 | 0.9238 |
| dot_f1_threshold | 0.782 |
| dot_precision | 0.8957 |
| dot_recall | 0.9537 |
| dot_ap | 0.9603 |
| manhattan_accuracy | 0.9218 |
| manhattan_accuracy_threshold | 9.9367 |
| manhattan_f1 | 0.9148 |
| manhattan_f1_threshold | 10.3162 |
| manhattan_precision | 0.887 |
| manhattan_recall | 0.9444 |
| manhattan_ap | 0.9579 |
| euclidean_accuracy | 0.93 |
| euclidean_accuracy_threshold | 0.6501 |
| euclidean_f1 | 0.9238 |
| euclidean_f1_threshold | 0.6603 |
| euclidean_precision | 0.8957 |
| euclidean_recall | 0.9537 |
| euclidean_ap | 0.9603 |
| max_accuracy | 0.93 |
| max_accuracy_threshold | 9.9367 |
| max_f1 | 0.9238 |
| max_f1_threshold | 10.3162 |
| max_precision | 0.8957 |
| max_recall | 0.9537 |
| max_ap | 0.9603 |
Training Details
Training Dataset
Unnamed Dataset
- Size: 971 training samples
- Columns:
sentence2,sentence1, andlabel - Approximate statistics based on the first 1000 samples:
sentence2 sentence1 label type string string int details - min: 4 tokens
- mean: 10.12 tokens
- max: 22 tokens
- min: 6 tokens
- mean: 10.82 tokens
- max: 22 tokens
- 0: ~48.61%
- 1: ~51.39%
- Samples:
sentence2 sentence1 label Total number of bones in an adult human bodyHow many bones are in the human body?1What is the largest river in North America?What is the largest lake in North America?0What is the capital of Australia?What is the capital of New Zealand?0 - Loss:
OnlineContrastiveLoss
Evaluation Dataset
Unnamed Dataset
- Size: 243 evaluation samples
- Columns:
sentence2,sentence1, andlabel - Approximate statistics based on the first 1000 samples:
sentence2 sentence1 label type string string int details - min: 4 tokens
- mean: 10.09 tokens
- max: 20 tokens
- min: 6 tokens
- mean: 10.55 tokens
- max: 22 tokens
- 0: ~55.56%
- 1: ~44.44%
- Samples:
sentence2 sentence1 label What are the various forms of renewable energy?What are the different types of renewable energy?1Gravity discovererWho discovered gravity?1Can you help me write this report?Can you help me understand this report?0 - Loss:
OnlineContrastiveLoss
Training Hyperparameters
Non-Default Hyperparameters
eval_strategy: epochper_device_train_batch_size: 32per_device_eval_batch_size: 32gradient_accumulation_steps: 2learning_rate: 3e-06weight_decay: 0.01num_train_epochs: 15lr_scheduler_type: reduce_lr_on_plateauwarmup_ratio: 0.1load_best_model_at_end: Trueoptim: adamw_torch_fused
All Hyperparameters
Click to expand
overwrite_output_dir: Falsedo_predict: Falseeval_strategy: epochprediction_loss_only: Trueper_device_train_batch_size: 32per_device_eval_batch_size: 32per_gpu_train_batch_size: Noneper_gpu_eval_batch_size: Nonegradient_accumulation_steps: 2eval_accumulation_steps: Nonelearning_rate: 3e-06weight_decay: 0.01adam_beta1: 0.9adam_beta2: 0.999adam_epsilon: 1e-08max_grad_norm: 1.0num_train_epochs: 15max_steps: -1lr_scheduler_type: reduce_lr_on_plateaulr_scheduler_kwargs: {}warmup_ratio: 0.1warmup_steps: 0log_level: passivelog_level_replica: warninglog_on_each_node: Truelogging_nan_inf_filter: Truesave_safetensors: Truesave_on_each_node: Falsesave_only_model: Falserestore_callback_states_from_checkpoint: Falseno_cuda: Falseuse_cpu: Falseuse_mps_device: Falseseed: 42data_seed: Nonejit_mode_eval: Falseuse_ipex: Falsebf16: Falsefp16: Falsefp16_opt_level: O1half_precision_backend: autobf16_full_eval: Falsefp16_full_eval: Falsetf32: Nonelocal_rank: 0ddp_backend: Nonetpu_num_cores: Nonetpu_metrics_debug: Falsedebug: []dataloader_drop_last: Falsedataloader_num_workers: 0dataloader_prefetch_factor: Nonepast_index: -1disable_tqdm: Falseremove_unused_columns: Truelabel_names: Noneload_best_model_at_end: Trueignore_data_skip: Falsefsdp: []fsdp_min_num_params: 0fsdp_config: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}fsdp_transformer_layer_cls_to_wrap: Noneaccelerator_config: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}deepspeed: Nonelabel_smoothing_factor: 0.0optim: adamw_torch_fusedoptim_args: Noneadafactor: Falsegroup_by_length: Falselength_column_name: lengthddp_find_unused_parameters: Noneddp_bucket_cap_mb: Noneddp_broadcast_buffers: Falsedataloader_pin_memory: Truedataloader_persistent_workers: Falseskip_memory_metrics: Trueuse_legacy_prediction_loop: Falsepush_to_hub: Falseresume_from_checkpoint: Nonehub_model_id: Nonehub_strategy: every_savehub_private_repo: Falsehub_always_push: Falsegradient_checkpointing: Falsegradient_checkpointing_kwargs: Noneinclude_inputs_for_metrics: Falseeval_do_concat_batches: Truefp16_backend: autopush_to_hub_model_id: Nonepush_to_hub_organization: Nonemp_parameters:auto_find_batch_size: Falsefull_determinism: Falsetorchdynamo: Noneray_scope: lastddp_timeout: 1800torch_compile: Falsetorch_compile_backend: Nonetorch_compile_mode: Nonedispatch_batches: Nonesplit_batches: Noneinclude_tokens_per_second: Falseinclude_num_input_tokens_seen: Falseneftune_noise_alpha: Noneoptim_target_modules: Nonebatch_eval_metrics: Falsebatch_sampler: batch_samplermulti_dataset_batch_sampler: proportional
Training Logs
| Epoch | Step | Training Loss | loss | pair-class-dev_max_ap | pair-class-test_max_ap |
|---|---|---|---|---|---|
| 0 | 0 | - | - | 0.6426 | - |
| 0.6452 | 10 | 4.7075 | - | - | - |
| 0.9677 | 15 | - | 3.1481 | 0.7843 | - |
| 1.2903 | 20 | 3.431 | - | - | - |
| 1.9355 | 30 | 3.4054 | - | - | - |
| 2.0 | 31 | - | 2.1820 | 0.8692 | - |
| 2.5806 | 40 | 2.2735 | - | - | - |
| 2.9677 | 46 | - | 1.8185 | 0.9078 | - |
| 3.2258 | 50 | 2.3159 | - | - | - |
| 3.8710 | 60 | 2.1466 | - | - | - |
| 4.0 | 62 | - | 1.5769 | 0.9252 | - |
| 4.5161 | 70 | 1.6873 | - | - | - |
| 4.9677 | 77 | - | 1.4342 | 0.9310 | - |
| 5.1613 | 80 | 1.5927 | - | - | - |
| 5.8065 | 90 | 1.4184 | - | - | - |
| 6.0 | 93 | - | 1.3544 | 0.9357 | - |
| 6.4516 | 100 | 1.333 | - | - | - |
| 6.9677 | 108 | - | 1.2630 | 0.9402 | - |
| 7.0968 | 110 | 1.089 | - | - | - |
| 7.7419 | 120 | 1.0947 | - | - | - |
| 8.0 | 124 | - | 1.2120 | 0.9444 | - |
| 8.3871 | 130 | 0.8118 | - | - | - |
| 8.9677 | 139 | - | 1.1641 | 0.9454 | - |
| 9.0323 | 140 | 1.0237 | - | - | - |
| 9.6774 | 150 | 0.8406 | - | - | - |
| 10.0 | 155 | - | 1.0481 | 0.9464 | - |
| 10.3226 | 160 | 0.7081 | - | - | - |
| 10.9677 | 170 | 0.7397 | 0.9324 | 0.9509 | - |
| 11.6129 | 180 | 0.5604 | - | - | - |
| 12.0 | 186 | - | 0.8386 | 0.9556 | - |
| 12.2581 | 190 | 0.5841 | - | - | - |
| 12.9032 | 200 | 0.5463 | - | - | - |
| 12.9677 | 201 | - | 0.7930 | 0.9577 | - |
| 13.5484 | 210 | 0.4599 | - | - | - |
| 14.0 | 217 | - | 0.7564 | 0.9599 | - |
| 14.1935 | 220 | 0.2437 | - | - | - |
| 14.5161 | 225 | - | 0.7522 | 0.9603 | 0.9603 |
- The bold row denotes the saved checkpoint.
Framework Versions
- Python: 3.10.12
- Sentence Transformers: 3.0.1
- Transformers: 4.41.2
- PyTorch: 2.1.2+cu121
- Accelerate: 0.32.1
- Datasets: 2.19.1
- Tokenizers: 0.19.1
Citation
BibTeX
Sentence Transformers
@inproceedings{reimers-2019-sentence-bert,
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2019",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/1908.10084",
}