Open dayuyang1999 opened 3 weeks ago
#!/bin/bash
# cluster 1
num=1
gpu_index=$(($num - 1)) # Calculates the 0-indexed GPU number
port_num=$(($num + 29500))
# Setting the specific GPU to be visible to the torchrun command
# Run the PyTorch distributed script with specified parameters
CUDA_VISIBLE_DEVICES=$gpu_index torchrun --nproc_per_node 1 --master_port $port_num /home/jovyan/test_inference/llama3-main/inference_MP_table_description.py \
--ckpt_dir /home/jovyan/model_weights/meta-llama/Meta-Llama-3-8B-Instruct/original \
--tokenizer_path /home/jovyan/model_weights/meta-llama/Meta-Llama-3-8B-Instruct/original/tokenizer.model \
--max_seq_len 2048 \
--max_batch_size 24 \
--temperature 0.1 \
--top_p 0.9 \
--input_file "/home/jovyan/project/description_data/dictionary_part_${num}.json" \
--output_file "/home/jovyan/project/description_generation/wikitables_descriptions_${num}.json"n
#!/bin/bash
# cluster 1
num=1
gpu_index=$(($num - 1)) # Calculates the 0-indexed GPU number
port_num=$(($num + 29500))
# Setting the specific GPU to be visible to the torchrun command
# Run the PyTorch distributed script with specified parameters
CUDA_VISIBLE_DEVICES=$gpu_index torchrun --nproc_per_node 1 --master_port $port_num /home/jovyan/test_inference/llama3-main/inference_MP_table_description.py \
--ckpt_dir /home/jovyan/model_weights/meta-llama/Meta-Llama-3-8B-Instruct/original \
--tokenizer_path /home/jovyan/model_weights/meta-llama/Meta-Llama-3-8B-Instruct/original/tokenizer.model \
--max_seq_len 2048 \
--max_batch_size 24 \
--temperature 0.1 \
--top_p 0.9 \
--input_file "/home/jovyan/project/description_data/dictionary_part_${num}.json" \
--output_file "/home/jovyan/project/description_generation/wikitables_descriptions_${num}.json"n