fats-fme commited on
Commit
3c29ea8
·
verified ·
1 Parent(s): 9b164bc

Training in progress, step 89, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54b580ddbf09d3d9ae2addf973352cb11707da67742b7b282fde53fde7fbe740
3
  size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:981264992e2426ae6dd9cd6ab39604f29b4c1b06b3272034c116255bee16bac0
3
  size 45118424
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf2da229f251b5f291f941c33cc14eed837857befbbc6c2ca7bccca1a3f0efe5
3
  size 90365754
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac2b19e2861b1f82518ea74977bab48b2e3e74a4053627d0cb602fc60631b3de
3
  size 90365754
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:265ac61368277eb3ace99e1735a0a601a5a63864d2a441bc9b97a9c99e0f4c89
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd574bca472a6ebfd89e5aa3033f856f2e017ccd1e7f19de76f7298f30a795b
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c79d137864c2e1cdce26d7c2feb8d8fbfd9200a245e19ae914aa4b88e97a6687
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78800c5f3e2fa2f4eb40a71d4e5f5b72a3535de84eba9d079818145d24d3aa25
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a045dc546280f3d42f2dc4c02c23ea3301726496c8a8623025df0283d2e3d076
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1200f1351667966f59f2212d75c031c1e7aaf68c3827c1ca68846af01b4ad8ef
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7677329624478443,
5
  "eval_steps": 23,
6
- "global_step": 69,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -522,6 +522,146 @@
522
  "eval_samples_per_second": 17.482,
523
  "eval_steps_per_second": 4.37,
524
  "step": 69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
  }
526
  ],
527
  "logging_steps": 1,
@@ -536,12 +676,12 @@
536
  "should_evaluate": false,
537
  "should_log": false,
538
  "should_save": true,
539
- "should_training_stop": false
540
  },
541
  "attributes": {}
542
  }
543
  },
544
- "total_flos": 5.341828583994163e+16,
545
  "train_batch_size": 2,
546
  "trial_name": null,
547
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9902642559109874,
5
  "eval_steps": 23,
6
+ "global_step": 89,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
522
  "eval_samples_per_second": 17.482,
523
  "eval_steps_per_second": 4.37,
524
  "step": 69
525
+ },
526
+ {
527
+ "epoch": 0.7788595271210014,
528
+ "grad_norm": 0.2063526213169098,
529
+ "learning_rate": 9.597340598905852e-05,
530
+ "loss": 0.7032,
531
+ "step": 70
532
+ },
533
+ {
534
+ "epoch": 0.7899860917941586,
535
+ "grad_norm": 0.23765341937541962,
536
+ "learning_rate": 8.79463319744677e-05,
537
+ "loss": 0.7518,
538
+ "step": 71
539
+ },
540
+ {
541
+ "epoch": 0.8011126564673157,
542
+ "grad_norm": 0.26653748750686646,
543
+ "learning_rate": 7.999743062239557e-05,
544
+ "loss": 0.8082,
545
+ "step": 72
546
+ },
547
+ {
548
+ "epoch": 0.8122392211404729,
549
+ "grad_norm": 0.2505719065666199,
550
+ "learning_rate": 7.217825360835473e-05,
551
+ "loss": 0.8035,
552
+ "step": 73
553
+ },
554
+ {
555
+ "epoch": 0.8233657858136301,
556
+ "grad_norm": 0.25650104880332947,
557
+ "learning_rate": 6.453951129574644e-05,
558
+ "loss": 0.8025,
559
+ "step": 74
560
+ },
561
+ {
562
+ "epoch": 0.8344923504867872,
563
+ "grad_norm": 0.270044207572937,
564
+ "learning_rate": 5.713074385969457e-05,
565
+ "loss": 0.8659,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.8456189151599444,
570
+ "grad_norm": 0.2685801684856415,
571
+ "learning_rate": 5.000000000000002e-05,
572
+ "loss": 0.8519,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.8567454798331016,
577
+ "grad_norm": 0.2514025568962097,
578
+ "learning_rate": 4.3193525326884435e-05,
579
+ "loss": 0.8471,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.8678720445062587,
584
+ "grad_norm": 0.2761232852935791,
585
+ "learning_rate": 3.675546244046228e-05,
586
+ "loss": 0.8711,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.8789986091794159,
591
+ "grad_norm": 0.28267714381217957,
592
+ "learning_rate": 3.072756464904006e-05,
593
+ "loss": 0.8459,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.8901251738525731,
598
+ "grad_norm": 0.29201367497444153,
599
+ "learning_rate": 2.514892518288988e-05,
600
+ "loss": 0.8672,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.9012517385257302,
605
+ "grad_norm": 0.2988654673099518,
606
+ "learning_rate": 2.0055723659649904e-05,
607
+ "loss": 0.781,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.9123783031988874,
612
+ "grad_norm": 0.3167029619216919,
613
+ "learning_rate": 1.5480991445620542e-05,
614
+ "loss": 0.8121,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.9235048678720446,
619
+ "grad_norm": 0.3307524621486664,
620
+ "learning_rate": 1.1454397434679021e-05,
621
+ "loss": 0.8358,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.9346314325452016,
626
+ "grad_norm": 0.35665422677993774,
627
+ "learning_rate": 8.002055634117578e-06,
628
+ "loss": 0.8123,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.9457579972183588,
633
+ "grad_norm": 0.36604735255241394,
634
+ "learning_rate": 5.146355805285452e-06,
635
+ "loss": 0.8791,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.9568845618915159,
640
+ "grad_norm": 0.3705534040927887,
641
+ "learning_rate": 2.905818257394799e-06,
642
+ "loss": 0.9042,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.9680111265646731,
647
+ "grad_norm": 0.4027464687824249,
648
+ "learning_rate": 1.2949737362087156e-06,
649
+ "loss": 0.7932,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.9791376912378303,
654
+ "grad_norm": 0.505004346370697,
655
+ "learning_rate": 3.2426918657900704e-07,
656
+ "loss": 0.8301,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.9902642559109874,
661
+ "grad_norm": 0.22385388612747192,
662
+ "learning_rate": 0.0,
663
+ "loss": 0.6786,
664
+ "step": 89
665
  }
666
  ],
667
  "logging_steps": 1,
 
676
  "should_evaluate": false,
677
  "should_log": false,
678
  "should_save": true,
679
+ "should_training_stop": true
680
  },
681
  "attributes": {}
682
  }
683
  },
684
+ "total_flos": 6.890184695296819e+16,
685
  "train_batch_size": 2,
686
  "trial_name": null,
687
  "trial_params": null