Spaces:
Runtime error
Runtime error
| DATA: | |
| task_name: wiki1m | |
| dataset_name: wiki1m | |
| dataset_config_name: null | |
| max_seq_length: 512 | |
| overwrite_output_dir: false # Set to false, if using one_hour_job | |
| overwrite_cache: false | |
| pad_to_max_length: true | |
| load_from_local: true | |
| max_train_samples: null | |
| max_eval_samples: null | |
| max_predict_samples: null | |
| train_file: datasets/Wiki1M/train.jsonl | |
| # validation_file: datasets/Wiki1M/test_unseen.jsonl | |
| # test_file: datasets/Wiki1M/test_unseen.jsonl | |
| validation_file: datasets/Wiki1M/test.jsonl | |
| test_file: datasets/Wiki1M/test.jsonl | |
| label_max_seq_length: 128 # For names baseline | |
| descriptions_file: datasets/Wiki1M/wiki_all_final_descs_fixed_dedup.json | |
| all_labels : datasets/Wiki1M/all_labels.txt | |
| test_labels: datasets/Wiki1M/unseen_labels.txt | |
| large_dset: true | |
| tokenized_descs_file: datasets/Wiki1M/tokenized_ner_descs_final_fixed_dedup128.npy | |
| train_tfidf_short: datasets/Wiki1M/train_shortlists_4K_1000.h5 | |
| # test_tfidf_short: datasets/Wiki1M/test_unseen_shortlists_4K.h5 | |
| # test_tfidf_short: datasets/Wiki1M/test_unseen_shortlists_4K_fixed.h5 | |
| test_tfidf_short: datasets/Wiki1M/test_shortlists_4K_1000_bak.h5 | |
| tok_format: 1 | |
| # max_descs_per_label: 5 | |
| # contrastive_learning_samples: 750 | |
| # cl_min_positive_descs: 1 | |
| # bm_short_file: datasets/eurlex4.3k/train_bmshort.txt | |
| # ignore_pos_labels_file: datasets/Amzn13K/ignore_train_split6500_fs5.txt | |
| coil_cluster_mapping_path: bert_coil_map_dict_lemma255K_isotropic.json | |
| MODEL: | |
| model_name_or_path: bert-base-uncased | |
| # pretrained_model_path: final_wiki_descs_short500/checkpoint-16000/pytorch_model.bin | |
| config_name: null | |
| tokenizer_name: null | |
| cache_dir: null | |
| use_fast_tokenizer: true | |
| model_revision: main | |
| use_auth_token: false | |
| ignore_mismatched_sizes: false | |
| negative_sampling: "none" | |
| semsup: true | |
| # label_model_name_or_path: bert-base-uncased # prajjwal1/bert-small | |
| label_model_name_or_path: prajjwal1/bert-small | |
| encoder_model_type: bert | |
| use_custom_optimizer: adamw | |
| output_learning_rate: 1.e-4 | |
| arch_type : 2 | |
| add_label_name: false | |
| normalize_embeddings: false | |
| tie_weights: false | |
| coil: true | |
| colbert: false | |
| # use_precomputed_embeddings: datasets/eurlex4.3k/heir_withdescriptions_4.3k_v1_embs_bert_9_128.npy | |
| token_dim: 16 | |
| label_frozen_layers: 2 | |
| TRAINING: | |
| do_train: true | |
| do_eval: true | |
| do_predict: false | |
| per_device_train_batch_size: 2 | |
| gradient_accumulation_steps: 4 | |
| per_device_eval_batch_size: 2 | |
| learning_rate: 5.e-5 # Will point to input encoder lr, if user_custom_optimizer is False | |
| num_train_epochs: 3 | |
| save_steps: 5000 | |
| evaluation_strategy: steps | |
| eval_steps: 5000 | |
| fp16: true | |
| fp16_opt_level: O1 | |
| lr_scheduler_type: "linear" # defaults to 'linear' | |
| dataloader_num_workers: 16 | |
| label_names: [labels] | |
| scenario: "unseen_labels" | |
| ddp_find_unused_parameters: false | |
| max_eval_samples: 50000 | |
| ignore_data_skip: true | |
| # one_hour_job: true | |
| random_sample_seed: 69 | |