Training in progress, step 100, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 167832240
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e235caf1fc39690d2653f8d542d2461306f2064d976c9a8c78a83a5152887be
|
| 3 |
size 167832240
|
last-checkpoint/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 335922386
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a4a6a893c39c365b1881cb949f8c6e9b95be639774db878aef5e1b42503fdc2d
|
| 3 |
size 335922386
|
last-checkpoint/rng_state.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14244
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:508553257e217165a214c0be92b028528283e5b951b31a736b90480478135cb2
|
| 3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1064
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a60c7d771c1fd156acee762fba03c724cb41829a3f71df370ecd1d20b134982
|
| 3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
-
"best_metric": 0.
|
| 3 |
-
"best_model_checkpoint": "miner_id_24/checkpoint-
|
| 4 |
-
"epoch": 0.
|
| 5 |
"eval_steps": 25,
|
| 6 |
-
"global_step":
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
@@ -564,6 +564,189 @@
|
|
| 564 |
"eval_samples_per_second": 2.856,
|
| 565 |
"eval_steps_per_second": 1.428,
|
| 566 |
"step": 75
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 567 |
}
|
| 568 |
],
|
| 569 |
"logging_steps": 1,
|
|
@@ -587,12 +770,12 @@
|
|
| 587 |
"should_evaluate": false,
|
| 588 |
"should_log": false,
|
| 589 |
"should_save": true,
|
| 590 |
-
"should_training_stop":
|
| 591 |
},
|
| 592 |
"attributes": {}
|
| 593 |
}
|
| 594 |
},
|
| 595 |
-
"total_flos": 1.
|
| 596 |
"train_batch_size": 2,
|
| 597 |
"trial_name": null,
|
| 598 |
"trial_params": null
|
|
|
|
| 1 |
{
|
| 2 |
+
"best_metric": 0.34765490889549255,
|
| 3 |
+
"best_model_checkpoint": "miner_id_24/checkpoint-100",
|
| 4 |
+
"epoch": 0.0020115563914689895,
|
| 5 |
"eval_steps": 25,
|
| 6 |
+
"global_step": 100,
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
|
|
|
| 564 |
"eval_samples_per_second": 2.856,
|
| 565 |
"eval_steps_per_second": 1.428,
|
| 566 |
"step": 75
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"epoch": 0.001528782857516432,
|
| 570 |
+
"grad_norm": 0.7186751365661621,
|
| 571 |
+
"learning_rate": 2.9876321572751144e-05,
|
| 572 |
+
"loss": 0.1929,
|
| 573 |
+
"step": 76
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"epoch": 0.0015488984214311217,
|
| 577 |
+
"grad_norm": 0.8120232224464417,
|
| 578 |
+
"learning_rate": 2.7557479520891104e-05,
|
| 579 |
+
"loss": 0.1906,
|
| 580 |
+
"step": 77
|
| 581 |
+
},
|
| 582 |
+
{
|
| 583 |
+
"epoch": 0.0015690139853458116,
|
| 584 |
+
"grad_norm": 0.5622069835662842,
|
| 585 |
+
"learning_rate": 2.5317852301584643e-05,
|
| 586 |
+
"loss": 0.158,
|
| 587 |
+
"step": 78
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"epoch": 0.0015891295492605016,
|
| 591 |
+
"grad_norm": 0.6373627185821533,
|
| 592 |
+
"learning_rate": 2.315988891431412e-05,
|
| 593 |
+
"loss": 0.158,
|
| 594 |
+
"step": 79
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"epoch": 0.0016092451131751915,
|
| 598 |
+
"grad_norm": 0.3394858241081238,
|
| 599 |
+
"learning_rate": 2.1085949060360654e-05,
|
| 600 |
+
"loss": 0.0549,
|
| 601 |
+
"step": 80
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"epoch": 0.0016293606770898814,
|
| 605 |
+
"grad_norm": 0.9142265915870667,
|
| 606 |
+
"learning_rate": 1.9098300562505266e-05,
|
| 607 |
+
"loss": 0.1498,
|
| 608 |
+
"step": 81
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"epoch": 0.0016494762410045713,
|
| 612 |
+
"grad_norm": 0.5488215088844299,
|
| 613 |
+
"learning_rate": 1.7199116885197995e-05,
|
| 614 |
+
"loss": 0.086,
|
| 615 |
+
"step": 82
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"epoch": 0.0016695918049192612,
|
| 619 |
+
"grad_norm": 0.6231745481491089,
|
| 620 |
+
"learning_rate": 1.5390474757906446e-05,
|
| 621 |
+
"loss": 0.1239,
|
| 622 |
+
"step": 83
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"epoch": 0.0016897073688339512,
|
| 626 |
+
"grad_norm": 0.7093625664710999,
|
| 627 |
+
"learning_rate": 1.3674351904242611e-05,
|
| 628 |
+
"loss": 0.1512,
|
| 629 |
+
"step": 84
|
| 630 |
+
},
|
| 631 |
+
{
|
| 632 |
+
"epoch": 0.0017098229327486409,
|
| 633 |
+
"grad_norm": 1.516791820526123,
|
| 634 |
+
"learning_rate": 1.2052624879351104e-05,
|
| 635 |
+
"loss": 0.3605,
|
| 636 |
+
"step": 85
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"epoch": 0.0017299384966633308,
|
| 640 |
+
"grad_norm": 0.8480135202407837,
|
| 641 |
+
"learning_rate": 1.0527067017923654e-05,
|
| 642 |
+
"loss": 0.2031,
|
| 643 |
+
"step": 86
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"epoch": 0.0017500540605780207,
|
| 647 |
+
"grad_norm": 0.6130940318107605,
|
| 648 |
+
"learning_rate": 9.09934649508375e-06,
|
| 649 |
+
"loss": 0.1745,
|
| 650 |
+
"step": 87
|
| 651 |
+
},
|
| 652 |
+
{
|
| 653 |
+
"epoch": 0.0017701696244927106,
|
| 654 |
+
"grad_norm": 1.8014602661132812,
|
| 655 |
+
"learning_rate": 7.771024502261526e-06,
|
| 656 |
+
"loss": 0.4315,
|
| 657 |
+
"step": 88
|
| 658 |
+
},
|
| 659 |
+
{
|
| 660 |
+
"epoch": 0.0017902851884074005,
|
| 661 |
+
"grad_norm": 0.6862621307373047,
|
| 662 |
+
"learning_rate": 6.543553540053926e-06,
|
| 663 |
+
"loss": 0.1008,
|
| 664 |
+
"step": 89
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"epoch": 0.0018104007523220905,
|
| 668 |
+
"grad_norm": 0.6760240197181702,
|
| 669 |
+
"learning_rate": 5.418275829936537e-06,
|
| 670 |
+
"loss": 0.1387,
|
| 671 |
+
"step": 90
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"epoch": 0.0018305163162367804,
|
| 675 |
+
"grad_norm": 2.04972767829895,
|
| 676 |
+
"learning_rate": 4.3964218465642355e-06,
|
| 677 |
+
"loss": 0.4774,
|
| 678 |
+
"step": 91
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"epoch": 0.0018506318801514703,
|
| 682 |
+
"grad_norm": 3.159485101699829,
|
| 683 |
+
"learning_rate": 3.4791089722651436e-06,
|
| 684 |
+
"loss": 0.4218,
|
| 685 |
+
"step": 92
|
| 686 |
+
},
|
| 687 |
+
{
|
| 688 |
+
"epoch": 0.00187074744406616,
|
| 689 |
+
"grad_norm": 2.143138885498047,
|
| 690 |
+
"learning_rate": 2.667340275199426e-06,
|
| 691 |
+
"loss": 0.556,
|
| 692 |
+
"step": 93
|
| 693 |
+
},
|
| 694 |
+
{
|
| 695 |
+
"epoch": 0.00189086300798085,
|
| 696 |
+
"grad_norm": 1.6069244146347046,
|
| 697 |
+
"learning_rate": 1.9620034125190644e-06,
|
| 698 |
+
"loss": 0.4657,
|
| 699 |
+
"step": 94
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"epoch": 0.0019109785718955398,
|
| 703 |
+
"grad_norm": 1.5652406215667725,
|
| 704 |
+
"learning_rate": 1.3638696597277679e-06,
|
| 705 |
+
"loss": 0.4702,
|
| 706 |
+
"step": 95
|
| 707 |
+
},
|
| 708 |
+
{
|
| 709 |
+
"epoch": 0.0019310941358102298,
|
| 710 |
+
"grad_norm": 2.650838613510132,
|
| 711 |
+
"learning_rate": 8.735930673024806e-07,
|
| 712 |
+
"loss": 0.6213,
|
| 713 |
+
"step": 96
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"epoch": 0.0019512096997249197,
|
| 717 |
+
"grad_norm": 0.9253861904144287,
|
| 718 |
+
"learning_rate": 4.917097454988584e-07,
|
| 719 |
+
"loss": 0.2009,
|
| 720 |
+
"step": 97
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"epoch": 0.0019713252636396096,
|
| 724 |
+
"grad_norm": 2.9420182704925537,
|
| 725 |
+
"learning_rate": 2.1863727812254653e-07,
|
| 726 |
+
"loss": 0.6738,
|
| 727 |
+
"step": 98
|
| 728 |
+
},
|
| 729 |
+
{
|
| 730 |
+
"epoch": 0.0019914408275542995,
|
| 731 |
+
"grad_norm": 3.0107245445251465,
|
| 732 |
+
"learning_rate": 5.467426590739511e-08,
|
| 733 |
+
"loss": 0.8191,
|
| 734 |
+
"step": 99
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"epoch": 0.0020115563914689895,
|
| 738 |
+
"grad_norm": 1.9796432256698608,
|
| 739 |
+
"learning_rate": 0.0,
|
| 740 |
+
"loss": 0.4038,
|
| 741 |
+
"step": 100
|
| 742 |
+
},
|
| 743 |
+
{
|
| 744 |
+
"epoch": 0.0020115563914689895,
|
| 745 |
+
"eval_loss": 0.34765490889549255,
|
| 746 |
+
"eval_runtime": 7329.2018,
|
| 747 |
+
"eval_samples_per_second": 2.856,
|
| 748 |
+
"eval_steps_per_second": 1.428,
|
| 749 |
+
"step": 100
|
| 750 |
}
|
| 751 |
],
|
| 752 |
"logging_steps": 1,
|
|
|
|
| 770 |
"should_evaluate": false,
|
| 771 |
"should_log": false,
|
| 772 |
"should_save": true,
|
| 773 |
+
"should_training_stop": true
|
| 774 |
},
|
| 775 |
"attributes": {}
|
| 776 |
}
|
| 777 |
},
|
| 778 |
+
"total_flos": 1.4874840035386982e+17,
|
| 779 |
"train_batch_size": 2,
|
| 780 |
"trial_name": null,
|
| 781 |
"trial_params": null
|