Open anhhuyalex opened 7 months ago
accelerate launch --num_processes=$(($NUM_GPUS * $COUNT_NODE)) --num_machines=$COUNT_NODE --main_process_ip=$MASTER_ADDR --main_process_port=$MASTER_PORT --mixed_precision=fp16 \ finetune.py --data_path=/fsx/proj-fmri/shared/mindeyev2_dataset \ --model_name=${model_name} --no-use_prior \ --subj=1 --subj_list=1 --batch_size=28 \ --num_samples=$1 --samples_pool=15000\ --hidden_dim=2048 --n_blocks=4 --max_lr=3e-5 --mixup_pct=.33 --num_epochs=40 --stage2 \ --ckpt_saving --wandb_log --wandb_group_name=check_variance_of_run_40epochs_pool15000 \ --resume_from_ckpt=../train_logs/stage1_prior_subj01_h2048_3e5 --filter_samples=./cache/subj01_permuted_samples.txt
check_variance_of_run_40epochs_pool15000 check_variance_of_run_40epochs
# check that batch size is always 28
# seed everything, use these 5 seeds, find images that would make the best impact across these 5 seeds
# don't just pick 1 image, pick 100 images
# our claim: removing a certain set of images really impacts the performance of the model more
# than another set
python finetune.py --data_path=/scratch/gpfs/KNORMAN/mindeyev2 --model_name=stage2_noprior_subj01x --subj=1 --subj_list=1 --num_sessions=-1 --max_lr=3e-5 --mixup_pct=.33 --num_epochs=$1 --use_image_aug --file_prefix=finetune_subj1_perf_vs_numsession_NOV24 --resume_from_ckpt=pretrainNOV16_1700670984.4315398.pkl