#data dataset_name: cadillac test_name: "testcase_train" gpu_id: 1 epochs: 1 # data_dir: /raid/ansysai/udbhav/alpha_Xdata/data_prep_transformer/cadillac_v2/1_VTK_surface/ data_dir: datasets/examples/testcase/cadillac_v2/1_VTK_surface json_file: ${data_dir}/params.json splits_file: ${data_dir}/ data_folder: ${dataset_name} input_normalization: "shift_axis" # options: "min_max", "std_norm", "none" normalization: "std_norm" norm_vars: "pressure" physical_scale_for_test: True diff_input_velocity: True # If true, inlet_x_velocity is added as an input feature # num_points: 40000 num_points: 30000 num_workers: 1 #model indim: 4 outdim: 1 model: ansysLPFMs hidden_dim: 256 n_heads: 8 n_decoder: 8 mlp_ratio: 2 #training val_iter: 1 lr: 0.001 batch_size: 1 optimizer: type: AdamW scheduler: OneCycleLR #OneCycleLR loss_type: huber # options: mse, mae, huber # scheduler: LinearWarmupCosineAnnealingLR num_processes: 1 max_grad_norm: 1.0 mixed_precision: True #currently default fp16 is selected by torch.autocast(). Fp16 gave the best results for Transformer based models. eval: False save_latent: False chunked_eval: True # Default with True is evaluation of max chunks of size num_points that can fit in a data sample, to avoid small last chunks train_ckpt_load: False ## Will load best model if ckpt_load is false pos_embed_sincos: False project_name: ${dataset_name}