| { | |
| "best_metric": 0.6283422459893048, | |
| "best_model_checkpoint": "outputs/indobert-base-p1-reddit-indonesia-sarcastic/checkpoint-618", | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 1545, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 9.997532801828659e-06, | |
| "loss": 0.4385, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.7980155917788803, | |
| "eval_f1": 0.56752655538695, | |
| "eval_loss": 0.425838440656662, | |
| "eval_precision": 0.6111111111111112, | |
| "eval_recall": 0.5297450424929179, | |
| "eval_runtime": 5.5937, | |
| "eval_samples_per_second": 252.25, | |
| "eval_steps_per_second": 4.112, | |
| "step": 309 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 9.990133642141359e-06, | |
| "loss": 0.3451, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.8029766123316796, | |
| "eval_f1": 0.6283422459893048, | |
| "eval_loss": 0.4345267117023468, | |
| "eval_precision": 0.5949367088607594, | |
| "eval_recall": 0.6657223796033994, | |
| "eval_runtime": 5.5909, | |
| "eval_samples_per_second": 252.374, | |
| "eval_steps_per_second": 4.114, | |
| "step": 618 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 9.9778098230154e-06, | |
| "loss": 0.2404, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.8015591778880227, | |
| "eval_f1": 0.5317725752508361, | |
| "eval_loss": 0.5054421424865723, | |
| "eval_precision": 0.6489795918367347, | |
| "eval_recall": 0.45042492917847027, | |
| "eval_runtime": 5.5745, | |
| "eval_samples_per_second": 253.117, | |
| "eval_steps_per_second": 4.126, | |
| "step": 927 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 9.960573506572391e-06, | |
| "loss": 0.1326, | |
| "step": 1236 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.7859673990077959, | |
| "eval_f1": 0.5451807228915662, | |
| "eval_loss": 0.7033084034919739, | |
| "eval_precision": 0.5819935691318328, | |
| "eval_recall": 0.5127478753541076, | |
| "eval_runtime": 5.5762, | |
| "eval_samples_per_second": 253.042, | |
| "eval_steps_per_second": 4.125, | |
| "step": 1236 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 9.938441702975689e-06, | |
| "loss": 0.0787, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.7880935506732814, | |
| "eval_f1": 0.5335413416536662, | |
| "eval_loss": 0.9795613884925842, | |
| "eval_precision": 0.59375, | |
| "eval_recall": 0.48441926345609065, | |
| "eval_runtime": 5.5587, | |
| "eval_samples_per_second": 253.838, | |
| "eval_steps_per_second": 4.138, | |
| "step": 1545 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 1545, | |
| "total_flos": 3249750422515200.0, | |
| "train_loss": 0.24707949987121383, | |
| "train_runtime": 629.3062, | |
| "train_samples_per_second": 1570.142, | |
| "train_steps_per_second": 49.102 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 30900, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 100, | |
| "save_steps": 500, | |
| "total_flos": 3249750422515200.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |