LiuLab-Bioelectronics-Harvard / UnitedNet

GNU General Public License v3.0
39 stars 14 forks source link

Training parameter settings for MUSE dataset #7

Closed xu15825503775 closed 1 year ago

xu15825503775 commented 1 year ago

When I want to train the MUSE dataset, I want to know how to set the parameters of the dataset. Because the samples you give are all three modal, while the MUSE dataset is bimodal

xintangg commented 1 year ago

Try this one:

MUSE_config = { "train_batch_size": 64, "finetune_batch_size": 64, "transfer_batch_size": 64, "train_epochs": 10, "finetune_epochs": 10, "transfer_epochs": 10, "train_task": "unsupervised_group_identification", "finetune_task": "unsupervised_group_identification", "transfer_task": None, "train_loss_weight": None, "finetune_loss_weight": None, "transfer_loss_weight": None, "lr": 0.005, "checkpoint": 1, "n_head": 10, "noise_level":[0.03]*2, "fuser_type":"WeightedFeatureMean", "encoders": [ { "input": 500, "hiddens": [128, 128], "output": 100, "use_biases": [True, True, True], "dropouts": [0, 0, 0], "activations": ["relu", "relu", "tanh"], "use_batch_norms": [True, True, False], "use_layer_norms": [False, False, False], "is_binary_input": False, }, { "input": 500, "hiddens": [128, 128], "output": 100, "use_biases": [True, True, True], "dropouts": [0, 0, 0], "activations": ["relu", "relu", "tanh"], "use_batch_norms": [True, True, False], "use_layer_norms": [False, False, False], "is_binary_input": False, }, ], "latent_projector": { "input": 100, "hiddens": [], "output": 100, "use_biases": [True], "dropouts": [0], "activations": [None], "use_batch_norms": [True], "use_layer_norms": [False], "is_binary_input": False, }, "decoders": [ { "input": 100, "hiddens": [128, 128], "output": 500, "use_biases": [True, True, True], "dropouts": [0, 0, 0], "activations": ["relu", "tanh", None], "use_batch_norms": [True, True, False], "use_layer_norms": [False, False, False], }, { "input": 100, "hiddens": [128, 128], "output": 500, "use_biases": [True, True, True], "dropouts": [0, 0, 0], "activations": ["relu", "tanh", None], "use_batch_norms": [True, True, False], "use_layer_norms": [False, False, False], }, ], "discriminators": [ { "input": 500, "hiddens": [128,128], "output": 1, "use_biases": [True, True,True], "dropouts": [0, 0, 0], "activations": ["relu", "relu", "sigmoid"], "use_batch_norms": [False, False, False], "use_layer_norms": [False, False, False], }, { "input": 500, "hiddens": [128,128], "output": 1, "use_biases": [True, True,True], "dropouts": [0, 0, 0], "activations": ["relu", "relu", "sigmoid"], "use_batch_norms": [False, False, False], "use_layer_norms": [False, False, False], }, ], "projectors": { "input": 100, "hiddens": [], "output": 100, "use_biases": [True], "dropouts": [0], "activations": [None], "use_batch_norms": [False], "use_layer_norms": [False], }, "clusters": { "input": 100, "hiddens": [], "output": 10, "use_biases": [True], "dropouts": [0], "activations": [ None], "use_batch_norms": [False], "use_layer_norms": [False], }, }