#!/bin/bash #SBATCH -p Gveval # 队列名称 #SBATCH --quotatype=spot # 竞价类型 #SBATCH --nodes=1 # 节点数量 #SBATCH --ntasks=8 # 总进程数(与GPU数量一致) #SBATCH --gres=gpu:8 # 每张卡分配的GPU数量 #SBATCH --cpus-per-task=16 # 每个进程分配的CPU核心数 #SBATCH --job-name=qwen_train # 任务名称(自定义) #SBATCH --requeue # 任务重排,当任务被别人抢断后,可以重新排队,但需要程序自动处理resume #SBATCH --open-mode append # 日志写入方式 export http_proxy=http://hanjiaming:DXtIkuMPmgy3M3UnCrRhGIxSMMaZn8iit2Br6UdG32fscs2l1bKKQ690WYTC@10.1.20.50:23128/ export https_proxy=http://hanjiaming:DXtIkuMPmgy3M3UnCrRhGIxSMMaZn8iit2Br6UdG32fscs2l1bKKQ690WYTC@10.1.20.50:23128/ export HTTP_PROXY=http://hanjiaming:DXtIkuMPmgy3M3UnCrRhGIxSMMaZn8iit2Br6UdG32fscs2l1bKKQ690WYTC@10.1.20.50:23128/ export HTTPS_PROXY=http://hanjiaming:DXtIkuMPmgy3M3UnCrRhGIxSMMaZn8iit2Br6UdG32fscs2l1bKKQ690WYTC@10.1.20.50:23128/ ; export LMMS_EVAL_LAUNCHER="accelerate" benchmark=scan2cap # choices: [scan2cap, scanrefer, scannet_4frames, scannet_6frames] output_path=logs/$(TZ="Asia/Shanghai" date "+%Y%m%d") model_path=ckpts/Qwen2.5-Omni-3B-sftv2-full model_args_str="pretrained=$model_path,attn_implementation=flash_attention_2,max_num_frames=32" export PYTHONPATH=$(pwd)/src:$PYTHONPATH export TOKENIZERS_PARALLELISM=true apptainer exec -f -w --nv --bind /mnt:/mnt /mnt/petrelfs/hanjiaming/llama_factory/ \ accelerate launch --num_processes=8 --main_process_port 29503 -m lmms_eval \ --model qwen2_5_omni \ --model_args "$model_args_str" \ --tasks ${benchmark} \ --batch_size 1 \ --log_samples_suffix original \ --log_samples \ --output_path $output_path