50 lines
1.5 KiB
Plaintext
50 lines
1.5 KiB
Plaintext
services:
|
|
gpt-oss:
|
|
image: nvcr.io/nvidia/vllm:26.03.post1-py3
|
|
container_name: vllm-gpt-oss
|
|
restart: always
|
|
ipc: host
|
|
shm_size: '16gb'
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: all
|
|
capabilities: [gpu]
|
|
environment:
|
|
- NVIDIA_VISIBLE_DEVICES=all
|
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
|
ports:
|
|
- "8000:8000"
|
|
volumes:
|
|
- /root/.cache/huggingface:/root/.cache/huggingface
|
|
command: >
|
|
vllm serve openai/gpt-oss-20b
|
|
--gpu-memory-utilization 0.35
|
|
--trust-remote-code
|
|
|
|
qwen-36:
|
|
image: nvcr.io/nvidia/vllm:26.03.post1-py3
|
|
container_name: vllm-qwen
|
|
restart: always
|
|
ipc: host
|
|
shm_size: '16gb'
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: all
|
|
capabilities: [gpu]
|
|
depends_on:
|
|
- gpt-oss
|
|
environment:
|
|
- NVIDIA_VISIBLE_DEVICES=all
|
|
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
|
ports:
|
|
- "8001:8000"
|
|
volumes:
|
|
- /root/.cache/huggingface:/root/.cache/huggingface
|
|
entrypoint: /bin/sh -c "sleep 300 && vllm serve /root/.cache/huggingface/hub/models--Qwen--Qwen3.6-35B-A3B-FP8/snapshots/61a5771f218894aaacf97551e24a25b866750fc2 --quantization fp8 --kv-cache-dtype fp8 --max-model-len 32768 --gpu-memory-utilization 0.40 --trust-remote-code --served-model-name qwen-3.6-blackwell"
|