generated from VectorInstitute/aieng-template
-
Notifications
You must be signed in to change notification settings - Fork 7
/
vllm.slurm
57 lines (51 loc) · 1.8 KB
/
vllm.slurm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
#!/bin/bash
#SBATCH --cpus-per-task=16
#SBATCH --mem=64G
# Load CUDA, change to the cuda version on your environment if different
source /opt/lmod/lmod/init/profile
module load cuda-12.3
nvidia-smi
source ${SRC_DIR}/find_port.sh
# Write server url to file
hostname=${SLURMD_NODENAME}
vllm_port_number=$(find_available_port $hostname 8080 65535)
echo "Server address: http://${hostname}:${vllm_port_number}/v1"
if [ "$ENFORCE_EAGER" = "True" ]; then
export ENFORCE_EAGER="--enforce-eager"
else
export ENFORCE_EAGER=""
fi
# Activate vllm venv
if [ "$VENV_BASE" = "singularity" ]; then
export SINGULARITY_IMAGE=/projects/aieng/public/vector-inference_0.6.4.post1.sif
export VLLM_NCCL_SO_PATH=/vec-inf/nccl/libnccl.so.2.18.1
module load singularity-ce/3.8.2
singularity exec $SINGULARITY_IMAGE ray stop
singularity exec --nv --bind ${MODEL_WEIGHTS_PARENT_DIR}:${MODEL_WEIGHTS_PARENT_DIR} $SINGULARITY_IMAGE \
python3.10 -m vllm.entrypoints.openai.api_server \
--model ${VLLM_MODEL_WEIGHTS} \
--served-model-name ${JOB_NAME} \
--host "0.0.0.0" \
--port ${vllm_port_number} \
--tensor-parallel-size ${NUM_GPUS} \
--dtype ${VLLM_DATA_TYPE} \
--max-logprobs ${VLLM_MAX_LOGPROBS} \
--trust-remote-code \
--max-model-len ${VLLM_MAX_MODEL_LEN} \
--max-num-seqs ${VLLM_MAX_NUM_SEQS} \
${ENFORCE_EAGER}
else
source ${VENV_BASE}/bin/activate
python3 -m vllm.entrypoints.openai.api_server \
--model ${VLLM_MODEL_WEIGHTS} \
--served-model-name ${JOB_NAME} \
--host "0.0.0.0" \
--port ${vllm_port_number} \
--tensor-parallel-size ${NUM_GPUS} \
--dtype ${VLLM_DATA_TYPE} \
--max-logprobs ${VLLM_MAX_LOGPROBS} \
--trust-remote-code \
--max-model-len ${VLLM_MAX_MODEL_LEN} \
--max-num-seqs ${VLLM_MAX_NUM_SEQS} \
${ENFORCE_EAGER}
fi