mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-11 05:38:38 +00:00
test
# What does this PR do? ## Test Plan
This commit is contained in:
parent
f50ce11a3b
commit
4a3d1e33f8
31 changed files with 727 additions and 892 deletions
|
@ -49,6 +49,7 @@ from llama_stack.apis.inference import (
|
|||
Inference,
|
||||
Message,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenaiChatCompletionRequest,
|
||||
OpenAIDeveloperMessageParam,
|
||||
OpenAIMessageParam,
|
||||
OpenAISystemMessageParam,
|
||||
|
@ -582,7 +583,7 @@ class ChatAgent(ShieldRunnerMixin):
|
|||
max_tokens = getattr(sampling_params, "max_tokens", None)
|
||||
|
||||
# Use OpenAI chat completion
|
||||
openai_stream = await self.inference_api.openai_chat_completion(
|
||||
params = OpenaiChatCompletionRequest(
|
||||
model=self.agent_config.model,
|
||||
messages=openai_messages,
|
||||
tools=openai_tools if openai_tools else None,
|
||||
|
@ -593,6 +594,7 @@ class ChatAgent(ShieldRunnerMixin):
|
|||
max_tokens=max_tokens,
|
||||
stream=True,
|
||||
)
|
||||
openai_stream = await self.inference_api.openai_chat_completion(params)
|
||||
|
||||
# Convert OpenAI stream back to Llama Stack format
|
||||
response_stream = convert_openai_chat_completion_stream(
|
||||
|
|
|
@ -41,6 +41,7 @@ from llama_stack.apis.inference import (
|
|||
Inference,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenaiChatCompletionRequest,
|
||||
OpenAIChatCompletionToolCall,
|
||||
OpenAIChoice,
|
||||
OpenAIMessageParam,
|
||||
|
@ -130,7 +131,7 @@ class StreamingResponseOrchestrator:
|
|||
# (some providers don't support non-empty response_format when tools are present)
|
||||
response_format = None if self.ctx.response_format.type == "text" else self.ctx.response_format
|
||||
logger.debug(f"calling openai_chat_completion with tools: {self.ctx.chat_tools}")
|
||||
completion_result = await self.inference_api.openai_chat_completion(
|
||||
params = OpenaiChatCompletionRequest(
|
||||
model=self.ctx.model,
|
||||
messages=messages,
|
||||
tools=self.ctx.chat_tools,
|
||||
|
@ -138,6 +139,7 @@ class StreamingResponseOrchestrator:
|
|||
temperature=self.ctx.temperature,
|
||||
response_format=response_format,
|
||||
)
|
||||
completion_result = await self.inference_api.openai_chat_completion(params)
|
||||
|
||||
# Process streaming chunks and build complete response
|
||||
completion_result_data = None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue