mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 09:32:36 +00:00
Convert TGI
This commit is contained in:
parent
9bb07ce298
commit
028530546f
14 changed files with 485 additions and 160 deletions
|
|
@ -1,13 +1,3 @@
|
|||
# NOTES:
|
||||
#
|
||||
# This Docker Compose (and the associated run.yaml) assumes you will be
|
||||
# running in the default "bridged" network mode.
|
||||
#
|
||||
# If you need "host" network mode, please uncomment
|
||||
# - network_mode: "host"
|
||||
#
|
||||
# Similarly change "host.docker.internal" to "localhost" in the run.yaml file
|
||||
#
|
||||
services:
|
||||
vllm-inference:
|
||||
image: vllm/vllm-openai:latest
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
version: '2'
|
||||
built_at: 2024-11-17 14:07:24.568750
|
||||
built_at: 2024-11-17 14:48:55.487270
|
||||
image_name: remote-vllm
|
||||
docker_image: llamastack/distribution-remote-vllm:test-0.0.52rc3
|
||||
conda_env: null
|
||||
apis:
|
||||
- safety
|
||||
- agents
|
||||
- telemetry
|
||||
- safety
|
||||
- inference
|
||||
- memory
|
||||
- inference
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: vllm-inference
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
version: '2'
|
||||
built_at: 2024-11-17 14:07:24.563541
|
||||
built_at: 2024-11-17 14:48:55.476058
|
||||
image_name: remote-vllm
|
||||
docker_image: llamastack/distribution-remote-vllm:test-0.0.52rc3
|
||||
conda_env: null
|
||||
apis:
|
||||
- safety
|
||||
- agents
|
||||
- telemetry
|
||||
- safety
|
||||
- inference
|
||||
- memory
|
||||
- inference
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: vllm-inference
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue