fix after rebasing, now test isn't working

This commit is contained in:
Honglin Cao 2025-02-04 16:59:56 -05:00
parent 102af46d5d
commit acc4d75c48
3 changed files with 17 additions and 9 deletions

View file

@ -17,6 +17,7 @@ from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
CompletionRequest, CompletionRequest,
CompletionResponse,
EmbeddingsResponse, EmbeddingsResponse,
Inference, Inference,
LogProbConfig, LogProbConfig,
@ -25,6 +26,7 @@ from llama_stack.apis.inference import (
ResponseFormatType, ResponseFormatType,
SamplingParams, SamplingParams,
ToolChoice, ToolChoice,
ToolConfig,
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
@ -42,6 +44,7 @@ from llama_stack.providers.utils.inference.openai_compat import (
process_completion_stream_response, process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt,
completion_request_to_prompt, completion_request_to_prompt,
content_has_media, content_has_media,
interleaved_content_as_str, interleaved_content_as_str,
@ -176,6 +179,7 @@ class CentMLInferenceAdapter(
response_format: Optional[ResponseFormat] = None, response_format: Optional[ResponseFormat] = None,
stream: Optional[bool] = False, stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None, logprobs: Optional[LogProbConfig] = None,
tool_config: Optional[ToolConfig] = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
""" """
For "chat completion" style requests. For "chat completion" style requests.

View file

@ -5,7 +5,7 @@ distribution_spec:
providers: providers:
inference: inference:
- remote::centml - remote::centml
memory: vector_io:
- inline::faiss - inline::faiss
- remote::chromadb - remote::chromadb
- remote::pgvector - remote::pgvector
@ -28,5 +28,6 @@ distribution_spec:
- remote::brave-search - remote::brave-search
- remote::tavily-search - remote::tavily-search
- inline::code-interpreter - inline::code-interpreter
- inline::memory-runtime - inline::rag-runtime
- remote::model-context-protocol
image_type: conda image_type: conda

View file

@ -6,11 +6,11 @@ apis:
- datasetio - datasetio
- eval - eval
- inference - inference
- memory
- safety - safety
- scoring - scoring
- telemetry - telemetry
- tool_runtime - tool_runtime
- vector_io
providers: providers:
inference: inference:
- provider_id: centml - provider_id: centml
@ -22,7 +22,7 @@ providers:
provider_type: inline::sentence-transformers provider_type: inline::sentence-transformers
config: {} config: {}
memory: vector_io:
- provider_id: faiss - provider_id: faiss
provider_type: inline::faiss provider_type: inline::faiss
config: config:
@ -92,8 +92,11 @@ providers:
- provider_id: code-interpreter - provider_id: code-interpreter
provider_type: inline::code-interpreter provider_type: inline::code-interpreter
config: {} config: {}
- provider_id: memory-runtime - provider_id: rag-runtime
provider_type: inline::memory-runtime provider_type: inline::rag-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {} config: {}
metadata_store: metadata_store:
@ -116,14 +119,14 @@ models:
shields: shields:
- shield_id: meta-llama/Llama-Guard-3-8B - shield_id: meta-llama/Llama-Guard-3-8B
memory_banks: [] vector_dbs: []
datasets: [] datasets: []
scoring_fns: [] scoring_fns: []
eval_tasks: [] eval_tasks: []
tool_groups: tool_groups:
- toolgroup_id: builtin::websearch - toolgroup_id: builtin::websearch
provider_id: tavily-search provider_id: tavily-search
- toolgroup_id: builtin::memory - toolgroup_id: builtin::rag
provider_id: memory-runtime provider_id: rag-runtime
- toolgroup_id: builtin::code_interpreter - toolgroup_id: builtin::code_interpreter
provider_id: code-interpreter provider_id: code-interpreter