00000285
apt-get install git-lfs 或者yum install git-lfs
mkdir -p /home/Qwen/
cd /home/Qwen/
git clone https://www.modelscope.cn/Qwen/Qwen3-4B.git
git clone https://www.modelscope.cn/Qwen/Qwen3-8B.git
git clone https://www.modelscope.cn/Qwen/Qwen3-14B.git
git clone https://www.modelscope.cn/Qwen/Qwen3-32B.git
120.220.95.189 zibo.harbor.iluvatar.com.cn
{
"exec-opts": ["native.cgroupdriver=systemd"],
"insecure-registries": ["zibo.harbor.iluvatar.com.cn:30000"]
}
docker pull zibo.harbor.iluvatar.com.cn:30000/saas/mr-bi150-4.1.3-aarch64-ubuntu20.04-py3.10-poc-llm-infer:v1.2.2
在windows上通过FileZilla工具下载,文件->站点管理器->新站点
协议:选择SFTP-SSH File Transfer Protocol
主机: iftp.iluvatar.com.cn 端口:29880
用户:iluvatar_mr
密码:联系天数工程师
在远程站点内输入/client_tmp/support/,把mr-bi150-4.1.3-aarch64-ubuntu20.04-py3.10-poc-llm-infer-v1.2.2.tar文件拷贝到windows本地目录上,然后放到服务器/home目录下
执行 docker load -i /home/mr-bi150-4.1.3-aarch64-ubuntu20.04-py3.10-poc-llm-infer-v1.2.2.tar
docker run -it -v /usr/src:/usr/src -v /lib/modules:/lib/modules -v /dev:/dev -v /home:/home --network=host --name=Qwen3 --pid=host --ipc=host --privileged --cap-add=ALL --pid=host zibo.harbor.iluvatar.com.cn:30000/saas/mr-bi150-4.1.3-aarch64-ubuntu20.04-py3.10-poc-llm-infer:v1.2.2 /bin/bash
cd /root/apps/llm-modelzoo/inference/Qwen/vllm
python3 offline_inference.py --model /home/Qwen/Qwen3-4B/ --max-tokens 256 --temperature 0.0 --max-model-len 3096
python3 offline_inference.py --model /home/Qwen/Qwen3-8B/ --max-tokens 256 --temperature 0.0 --max-model-len 3096
python3 offline_inference.py --model /home/Qwen/Qwen3-14B/ --max-tokens 256 -tp 2 --temperature 0.0 --max-model-len 3096
python3 offline_inference.py --model /home/Qwen/Qwen3-32B/ --max-tokens 256 -tp 4 --temperature 0.0 --max-model-len 3096
# server 端
python3 -m vllm.entrypoints.openai.api_server --model /home/Qwen/Qwen3-4B/ --gpu-memory-utilization 0.9 --max-num-batched-tokens 5120 --max-model-len 2048 --max-num-seqs 256 --host 0.0.0.0 --port 1234 --trust-remote-code
python3 -m vllm.entrypoints.openai.api_server --model /home/Qwen/Qwen3-8B/ --gpu-memory-utilization 0.9 --max-num-batched-tokens 5120 --max-model-len 2048 --max-num-seqs 256 --host 0.0.0.0 --port 1234 --trust-remote-code
python3 -m vllm.entrypoints.openai.api_server --model /home/Qwen/Qwen3-14B/ --gpu-memory-utilization 0.9 --max-num-batched-tokens 5120 --max-model-len 2048 --max-num-seqs 256 -tp 2 --host 0.0.0.0 --port 1234 --trust-remote-code
python3 -m vllm.entrypoints.openai.api_server --model /home/Qwen/Qwen3-32B/ --gpu-memory-utilization 0.9 --max-num-batched-tokens 5120 --max-model-len 2048 --max-num-seqs 256 -tp 4 --host 0.0.0.0 --port 1234 --trust-remote-code --distributed-executor-backend ray
# client 端
curl -X POST http://0.0.0.0:1234/v1/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{"model": "/home/Qwen/Qwen3-4B/","prompt": "介绍一下湖南省常德市","temperature": 0.0,"max_tokens": 512}'
curl -X POST http://0.0.0.0:1234/v1/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{"model": "/home/Qwen/Qwen3-8B/","prompt": "介绍一下湖南省常德市","temperature": 0.0,"max_tokens": 512}'
curl -X POST http://0.0.0.0:1234/v1/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{"model": "/home/Qwen/Qwen3-14B/","prompt": "介绍一下湖南省常德市","temperature": 0.0,"max_tokens": 512}'
curl -X POST http://0.0.0.0:1234/v1/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer YOUR_API_KEY" \
-d '{"model": "/home/Qwen/Qwen3-32B/","prompt": "介绍一下湖南省常德市","temperature": 0.0,"max_tokens": 512}'