mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-01 16:24:44 +00:00
kind of working
This commit is contained in:
parent
b63982ef00
commit
3c24be8273
6 changed files with 42 additions and 19 deletions
|
@ -48,15 +48,9 @@ spec:
|
|||
readinessProbe:
|
||||
httpGet:
|
||||
path: /v1/models
|
||||
port: http-openai
|
||||
initialDelaySeconds: 360
|
||||
periodSeconds: 360
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /v1/health
|
||||
port: http-openai
|
||||
initialDelaySeconds: 600
|
||||
periodSeconds: 360
|
||||
port: 8000
|
||||
initialDelaySeconds: 100
|
||||
periodSeconds: 100
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
|
|
|
@ -15,14 +15,14 @@ data:
|
|||
- provider_id: vllm-inference
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
url: ${env.VLLM_URL:=http://localhost:8000/v1}
|
||||
url: ${env.VLLM_URL:=http://localhost:8001/v1}
|
||||
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
||||
api_token: ${env.VLLM_API_TOKEN:=fake}
|
||||
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
||||
- provider_id: nvidia
|
||||
provider_type: remote::nvidia
|
||||
config:
|
||||
url: ${env.NVIDIA_BASE_URL:=http://localhost:8001/v1}
|
||||
url: ${env.NVIDIA_BASE_URL:=http://localhost:8000/v1}
|
||||
api_key: ${env.NVIDIA_API_KEY:=}
|
||||
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
|
||||
- provider_id: sentence-transformers
|
||||
|
|
|
@ -28,10 +28,10 @@ spec:
|
|||
initContainers:
|
||||
- name: wait-for-vllm-server
|
||||
image: busybox:1.28
|
||||
command: ['sh', '-c', 'until nc -z vllm-server.default.svc.cluster.local 8001; do echo waiting for vllm-server on port 8000; sleep 2; done;']
|
||||
command: ['sh', '-c', 'until nc -z vllm-server.default.svc.cluster.local 8001; do echo waiting for vllm-server on port 8001; sleep 2; done;']
|
||||
- name: wait-for-llm-nim-code
|
||||
image: busybox:1.28
|
||||
command: ['sh', '-c', 'until nc -z llm-nim-code.default.svc.cluster.local 8000; do echo waiting for llm-nim-code on port 8001; sleep 2; done;']
|
||||
command: ['sh', '-c', 'until nc -z llm-nim-code.default.svc.cluster.local 8000; do echo waiting for llm-nim-code on port 8000; sleep 2; done;']
|
||||
containers:
|
||||
- name: llama-stack
|
||||
image: llamastack/distribution-starter:0.2.15
|
||||
|
@ -54,7 +54,7 @@ spec:
|
|||
- name: VLLM_MAX_TOKENS
|
||||
value: "3072"
|
||||
- name: NVIDIA_BASE_URL
|
||||
value: http://llm-nim-code.default.svc.cluster.local:8000/v1
|
||||
value: http://llm-nim-code.default.svc.cluster.local:8000
|
||||
- name: POSTGRES_HOST
|
||||
value: postgres-server.default.svc.cluster.local
|
||||
- name: POSTGRES_PORT
|
||||
|
@ -67,7 +67,22 @@ spec:
|
|||
value: "${CODE_MODEL}"
|
||||
- name: TAVILY_SEARCH_API_KEY
|
||||
value: "${TAVILY_SEARCH_API_KEY}"
|
||||
command: ["python", "-m", "llama_stack.distribution.server.server", "--config", "/etc/config/stack_run_config.yaml", "--port", "8321"]
|
||||
command: ["/bin/sh"]
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
# Install pip and git
|
||||
/usr/local/bin/python -m pip install --upgrade pip
|
||||
apt-get update && apt-get install -y git
|
||||
# Clone the repository
|
||||
git clone https://github.com/meta-llama/llama-stack.git /app
|
||||
git checkout k8s_demo
|
||||
|
||||
cd /app/llama_stack/
|
||||
# Install llama-stack
|
||||
pip install -e .
|
||||
# Run the llama-stack server
|
||||
python -m llama_stack.distribution.server.server --config /etc/config/stack_run_config.yaml --port 8321
|
||||
ports:
|
||||
- containerPort: 8321
|
||||
volumeMounts:
|
||||
|
|
|
@ -12,14 +12,14 @@ providers:
|
|||
- provider_id: vllm-inference
|
||||
provider_type: remote::vllm
|
||||
config:
|
||||
url: ${env.VLLM_URL:=http://localhost:8000/v1}
|
||||
url: ${env.VLLM_URL:=http://localhost:8001/v1}
|
||||
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
||||
api_token: ${env.VLLM_API_TOKEN:=fake}
|
||||
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
||||
- provider_id: nvidia
|
||||
provider_type: remote::nvidia
|
||||
config:
|
||||
url: ${env.NVIDIA_BASE_URL:=http://localhost:8001/v1}
|
||||
url: ${env.NVIDIA_BASE_URL:=http://localhost:8000/v1}
|
||||
api_key: ${env.NVIDIA_API_KEY:=}
|
||||
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
|
||||
- provider_id: sentence-transformers
|
||||
|
|
|
@ -8,6 +8,7 @@ import os
|
|||
import streamlit as st
|
||||
|
||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||
from llama_stack_client import LlamaStackClient
|
||||
|
||||
|
||||
def providers():
|
||||
|
@ -37,7 +38,18 @@ def providers():
|
|||
st.session_state["tavily_search_api_key"] = tavily_search_api_key
|
||||
|
||||
# Update the client with the new API key
|
||||
llama_stack_api.update_provider_data("tavily_search_api_key", tavily_search_api_key)
|
||||
# Check if update_provider_data method exists, otherwise update manually
|
||||
if hasattr(llama_stack_api, "update_provider_data"):
|
||||
llama_stack_api.update_provider_data("tavily_search_api_key", tavily_search_api_key)
|
||||
else:
|
||||
# Fallback implementation if method doesn't exist
|
||||
llama_stack_api.provider_data = llama_stack_api.provider_data or {}
|
||||
llama_stack_api.provider_data["tavily_search_api_key"] = tavily_search_api_key
|
||||
# Reinitialize the client with updated provider data
|
||||
llama_stack_api.client = LlamaStackClient(
|
||||
base_url=os.environ.get("LLAMA_STACK_ENDPOINT", "http://localhost:8321"),
|
||||
provider_data=llama_stack_api.provider_data,
|
||||
)
|
||||
|
||||
st.success("API keys saved successfully!")
|
||||
|
||||
|
|
|
@ -110,9 +110,11 @@ async def stream_and_store_openai_completion(
|
|||
logprobs_content = choice_data["logprobs_content_parts"]
|
||||
final_logprobs = OpenAIChoiceLogprobs(content=logprobs_content) if logprobs_content else None
|
||||
|
||||
# Ensure finish_reason is a string as required by OpenAIChoice
|
||||
finish_reason = choice_data["finish_reason"] or "unknown"
|
||||
assembled_choices.append(
|
||||
OpenAIChoice(
|
||||
finish_reason=choice_data["finish_reason"],
|
||||
finish_reason=finish_reason,
|
||||
index=choice_idx,
|
||||
message=message,
|
||||
logprobs=final_logprobs,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue