00000264
apt-get install git-lfs 或者yum install git-lfs
cd /home/deepseek-ai/
git clone https://www.modelscope.cn/deepseek-ai/DeepSeek-R1-Distill-Llama-70B.git
120.220.95.189 zibo.harbor.iluvatar.com.cn
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": ["zibo.harbor.iluvatar.com.cn:30000"]
}
docker pull zibo.harbor.iluvatar.com.cn:30000/saas/bi100-3.2.1-x86-ubuntu20.04-py3.10-poc-llm-infer:v1.2.2
docker run -it -v /usr/src:/usr/src -v /lib/modules:/lib/modules -v /dev:/dev -v /home:/home -p 1000-1999:1000-1999 --name=test --pid=host --ipc=host --privileged --cap-add=ALL --pid=host zibo.harbor.iluvatar.com.cn:30000/saas/bi100-3.2.1-x86-ubuntu20.04-py3.10-poc-llm-infer:v1.2.2 /bin/bash
cd /root/apps/llm-modelzoo/inference/LLama/vllm
python3 offline_inference.py --model /home/deepseek-ai/DeepSeek-R1-Distill-Llama-70B/ --max-tokens 256 -tp 8 --trust-remote-code --temperature 0.55 --gpu-memory-utilization 0.97 --max-model-len 8192
cd ~/apps/llm-modelzoo/benchmark/vllm
server端
python3 -m vllm.entrypoints.openai.api_server --model /home/deepseek-ai/DeepSeek-R1-Distill-Llama-70B/ --gpu-memory-utilization 0.9 --max-num-batched-tokens 8192 --max-model-len 8192 --max-num-seqs 256 -tp 8 --host 0.0.0.0 --port 1234
client端
python3 benchmark_server_openapi.py --tokenizer /home/deepseek-ai/DeepSeek-R1-Distill-Llama-70B/ --host 127.0.0.1 --port 1234 --num-prompts 32 --input-tokens 256 --output-tokens 128 --time-interval 0.5 --segments 50,90,99 --save-csv
使用curl测试
curl -X POST http://0.0.0.0:1234/v1/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{"model": "/data1/deepseek/DeepSeek-R1-Distill-Llama-70B/","prompt": "常德美食有哪些","temperature": 0.0,"max_tokens": 2000,"stream":"true"}'