projectme / training_config.json
Aze4ka's picture
Upload training_config.json with huggingface_hub
81f5bc3 verified
raw
history blame contribute delete
824 Bytes
{
"model_name": "meta-llama/Llama-3.2-3B-Instruct",
"task_type": "CAUSAL_LM",
"training_parameters": {
"num_epochs": 1,
"batch_size": 8,
"learning_rate": 0.0002,
"max_length": 1024,
"num_warmup_steps": 128
},
"lora_config": {
"r": 16,
"lora_alpha": 32,
"lora_dropout": 0.1,
"bias": "none",
"target_modules": [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj"
]
},
"training_results": {
"final_training_loss": 2.3811,
"final_validation_loss": 2.2514,
"final_training_perplexity": 10.82,
"final_validation_perplexity": 9.5,
"total_steps": 8012,
"training_time_hours": 8.1
},
"hardware": {
"platform": "TPU v3-8",
"framework": "PyTorch + torch_xla",
"environment": "Kaggle"
}
}