Merge branch 'main' into tests_docs

This commit is contained in:
Ashwin Bharambe 2025-08-14 21:20:19 -07:00 committed by GitHub
commit 053ca90ce6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
106 changed files with 8929 additions and 7614 deletions

View file

@ -5,7 +5,7 @@ run-name: Run the integration test suite from tests/integration in replay mode
on: on:
push: push:
branches: [ main ] branches: [ main ]
pull_request_target: pull_request:
branches: [ main ] branches: [ main ]
types: [opened, synchronize, reopened] types: [opened, synchronize, reopened]
paths: paths:
@ -34,7 +34,7 @@ on:
concurrency: concurrency:
# Skip concurrency for pushes to main - each commit should be tested independently # Skip concurrency for pushes to main - each commit should be tested independently
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.event.pull_request.number }} group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:

View file

@ -14,9 +14,11 @@ on:
- 'pyproject.toml' - 'pyproject.toml'
- 'requirements.txt' - 'requirements.txt'
- '.github/workflows/integration-vector-io-tests.yml' # This workflow - '.github/workflows/integration-vector-io-tests.yml' # This workflow
schedule:
- cron: '0 0 * * *' # (test on python 3.13) Daily at 12 AM UTC
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
@ -25,7 +27,7 @@ jobs:
strategy: strategy:
matrix: matrix:
vector-io-provider: ["inline::faiss", "inline::sqlite-vec", "inline::milvus", "remote::chromadb", "remote::pgvector", "remote::weaviate", "remote::qdrant"] vector-io-provider: ["inline::faiss", "inline::sqlite-vec", "inline::milvus", "remote::chromadb", "remote::pgvector", "remote::weaviate", "remote::qdrant"]
python-version: ["3.12", "3.13"] python-version: ${{ github.event.schedule == '0 0 * * *' && fromJSON('["3.12", "3.13"]') || fromJSON('["3.12"]') }}
fail-fast: false # we want to run all tests regardless of failure fail-fast: false # we want to run all tests regardless of failure
steps: steps:

View file

@ -3,7 +3,7 @@ name: Integration Tests (Record)
run-name: Run the integration test suite from tests/integration run-name: Run the integration test suite from tests/integration
on: on:
pull_request: pull_request_target:
branches: [ main ] branches: [ main ]
types: [opened, synchronize, labeled] types: [opened, synchronize, labeled]
paths: paths:
@ -23,7 +23,7 @@ on:
default: 'ollama' default: 'ollama'
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:

View file

@ -2,6 +2,7 @@ exclude: 'build/'
default_language_version: default_language_version:
python: python3.12 python: python3.12
node: "22"
repos: repos:
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
@ -145,6 +146,20 @@ repos:
pass_filenames: false pass_filenames: false
require_serial: true require_serial: true
files: ^.github/workflows/.*$ files: ^.github/workflows/.*$
- id: ui-prettier
name: Format UI code with Prettier
entry: bash -c 'cd llama_stack/ui && npm run format'
language: system
files: ^llama_stack/ui/.*\.(ts|tsx)$
pass_filenames: false
require_serial: true
- id: ui-eslint
name: Lint UI code with ESLint
entry: bash -c 'cd llama_stack/ui && npm run lint -- --fix --quiet'
language: system
files: ^llama_stack/ui/.*\.(ts|tsx)$
pass_filenames: false
require_serial: true
ci: ci:
autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks

View file

@ -1,8 +1,5 @@
# Llama Stack # Llama Stack
<a href="https://trendshift.io/repositories/11824" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11824" alt="meta-llama%2Fllama-stack | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
-----
[![PyPI version](https://img.shields.io/pypi/v/llama_stack.svg)](https://pypi.org/project/llama_stack/) [![PyPI version](https://img.shields.io/pypi/v/llama_stack.svg)](https://pypi.org/project/llama_stack/)
[![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack)](https://pypi.org/project/llama-stack/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/llama-stack)](https://pypi.org/project/llama-stack/)
[![License](https://img.shields.io/pypi/l/llama_stack.svg)](https://github.com/meta-llama/llama-stack/blob/main/LICENSE) [![License](https://img.shields.io/pypi/l/llama_stack.svg)](https://github.com/meta-llama/llama-stack/blob/main/LICENSE)

View file

View file

@ -48,8 +48,8 @@ from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from .agent_instance import ChatAgent from .agent_instance import ChatAgent
from .config import MetaReferenceAgentsImplConfig from .config import MetaReferenceAgentsImplConfig
from .openai_responses import OpenAIResponsesImpl
from .persistence import AgentInfo from .persistence import AgentInfo
from .responses.openai_responses import OpenAIResponsesImpl
logger = logging.getLogger() logger = logging.getLogger()

View file

@ -0,0 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

View file

@ -0,0 +1,271 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import time
import uuid
from collections.abc import AsyncIterator
from pydantic import BaseModel
from llama_stack.apis.agents import Order
from llama_stack.apis.agents.openai_responses import (
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIDeleteResponseObject,
OpenAIResponseInput,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseMessage,
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseText,
OpenAIResponseTextFormat,
)
from llama_stack.apis.inference import (
Inference,
OpenAISystemMessageParam,
)
from llama_stack.apis.tools import ToolGroups, ToolRuntime
from llama_stack.apis.vector_io import VectorIO
from llama_stack.log import get_logger
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
from .streaming import StreamingResponseOrchestrator
from .tool_executor import ToolExecutor
from .types import ChatCompletionContext
from .utils import (
convert_response_input_to_chat_messages,
convert_response_text_to_chat_response_format,
)
logger = get_logger(name=__name__, category="responses")
class OpenAIResponsePreviousResponseWithInputItems(BaseModel):
input_items: ListOpenAIResponseInputItem
response: OpenAIResponseObject
class OpenAIResponsesImpl:
def __init__(
self,
inference_api: Inference,
tool_groups_api: ToolGroups,
tool_runtime_api: ToolRuntime,
responses_store: ResponsesStore,
vector_io_api: VectorIO, # VectorIO
):
self.inference_api = inference_api
self.tool_groups_api = tool_groups_api
self.tool_runtime_api = tool_runtime_api
self.responses_store = responses_store
self.vector_io_api = vector_io_api
self.tool_executor = ToolExecutor(
tool_groups_api=tool_groups_api,
tool_runtime_api=tool_runtime_api,
vector_io_api=vector_io_api,
)
async def _prepend_previous_response(
self,
input: str | list[OpenAIResponseInput],
previous_response_id: str | None = None,
):
if previous_response_id:
previous_response_with_input = await self.responses_store.get_response_object(previous_response_id)
# previous response input items
new_input_items = previous_response_with_input.input
# previous response output items
new_input_items.extend(previous_response_with_input.output)
# new input items from the current request
if isinstance(input, str):
new_input_items.append(OpenAIResponseMessage(content=input, role="user"))
else:
new_input_items.extend(input)
input = new_input_items
return input
async def _prepend_instructions(self, messages, instructions):
if instructions:
messages.insert(0, OpenAISystemMessageParam(content=instructions))
async def get_openai_response(
self,
response_id: str,
) -> OpenAIResponseObject:
response_with_input = await self.responses_store.get_response_object(response_id)
return OpenAIResponseObject(**{k: v for k, v in response_with_input.model_dump().items() if k != "input"})
async def list_openai_responses(
self,
after: str | None = None,
limit: int | None = 50,
model: str | None = None,
order: Order | None = Order.desc,
) -> ListOpenAIResponseObject:
return await self.responses_store.list_responses(after, limit, model, order)
async def list_openai_response_input_items(
self,
response_id: str,
after: str | None = None,
before: str | None = None,
include: list[str] | None = None,
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
"""List input items for a given OpenAI response.
:param response_id: The ID of the response to retrieve input items for.
:param after: An item ID to list items after, used for pagination.
:param before: An item ID to list items before, used for pagination.
:param include: Additional fields to include in the response.
:param limit: A limit on the number of objects to be returned.
:param order: The order to return the input items in.
:returns: An ListOpenAIResponseInputItem.
"""
return await self.responses_store.list_response_input_items(response_id, after, before, include, limit, order)
async def _store_response(
self,
response: OpenAIResponseObject,
input: str | list[OpenAIResponseInput],
) -> None:
new_input_id = f"msg_{uuid.uuid4()}"
if isinstance(input, str):
# synthesize a message from the input string
input_content = OpenAIResponseInputMessageContentText(text=input)
input_content_item = OpenAIResponseMessage(
role="user",
content=[input_content],
id=new_input_id,
)
input_items_data = [input_content_item]
else:
# we already have a list of messages
input_items_data = []
for input_item in input:
if isinstance(input_item, OpenAIResponseMessage):
# These may or may not already have an id, so dump to dict, check for id, and add if missing
input_item_dict = input_item.model_dump()
if "id" not in input_item_dict:
input_item_dict["id"] = new_input_id
input_items_data.append(OpenAIResponseMessage(**input_item_dict))
else:
input_items_data.append(input_item)
await self.responses_store.store_response_object(
response_object=response,
input=input_items_data,
)
async def create_openai_response(
self,
input: str | list[OpenAIResponseInput],
model: str,
instructions: str | None = None,
previous_response_id: str | None = None,
store: bool | None = True,
stream: bool | None = False,
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
include: list[str] | None = None,
max_infer_iters: int | None = 10,
):
stream = bool(stream)
text = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")) if text is None else text
stream_gen = self._create_streaming_response(
input=input,
model=model,
instructions=instructions,
previous_response_id=previous_response_id,
store=store,
temperature=temperature,
text=text,
tools=tools,
max_infer_iters=max_infer_iters,
)
if stream:
return stream_gen
else:
response = None
async for stream_chunk in stream_gen:
if stream_chunk.type == "response.completed":
if response is not None:
raise ValueError("The response stream completed multiple times! Earlier response: {response}")
response = stream_chunk.response
# don't leave the generator half complete!
if response is None:
raise ValueError("The response stream never completed")
return response
async def _create_streaming_response(
self,
input: str | list[OpenAIResponseInput],
model: str,
instructions: str | None = None,
previous_response_id: str | None = None,
store: bool | None = True,
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
max_infer_iters: int | None = 10,
) -> AsyncIterator[OpenAIResponseObjectStream]:
# Input preprocessing
input = await self._prepend_previous_response(input, previous_response_id)
messages = await convert_response_input_to_chat_messages(input)
await self._prepend_instructions(messages, instructions)
# Structured outputs
response_format = await convert_response_text_to_chat_response_format(text)
ctx = ChatCompletionContext(
model=model,
messages=messages,
response_tools=tools,
temperature=temperature,
response_format=response_format,
)
# Create orchestrator and delegate streaming logic
response_id = f"resp-{uuid.uuid4()}"
created_at = int(time.time())
orchestrator = StreamingResponseOrchestrator(
inference_api=self.inference_api,
ctx=ctx,
response_id=response_id,
created_at=created_at,
text=text,
max_infer_iters=max_infer_iters,
tool_executor=self.tool_executor,
)
# Stream the response
final_response = None
async for stream_chunk in orchestrator.create_response():
if stream_chunk.type == "response.completed":
final_response = stream_chunk.response
yield stream_chunk
# Store the response if requested
if store and final_response:
await self._store_response(
response=final_response,
input=input,
)
async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
return await self.responses_store.delete_response_object(response_id)

View file

@ -0,0 +1,634 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import uuid
from collections.abc import AsyncIterator
from typing import Any
from llama_stack.apis.agents.openai_responses import (
AllowedToolsFilter,
MCPListToolsTool,
OpenAIResponseContentPartOutputText,
OpenAIResponseInputTool,
OpenAIResponseInputToolMCP,
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseContentPartAdded,
OpenAIResponseObjectStreamResponseContentPartDone,
OpenAIResponseObjectStreamResponseCreated,
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta,
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone,
OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta,
OpenAIResponseObjectStreamResponseMcpCallArgumentsDone,
OpenAIResponseObjectStreamResponseMcpListToolsCompleted,
OpenAIResponseObjectStreamResponseMcpListToolsInProgress,
OpenAIResponseObjectStreamResponseOutputItemAdded,
OpenAIResponseObjectStreamResponseOutputItemDone,
OpenAIResponseObjectStreamResponseOutputTextDelta,
OpenAIResponseOutput,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseText,
WebSearchToolTypes,
)
from llama_stack.apis.inference import (
Inference,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionToolCall,
OpenAIChoice,
)
from llama_stack.log import get_logger
from .types import ChatCompletionContext, ChatCompletionResult
from .utils import convert_chat_choice_to_response_message, is_function_tool_call
logger = get_logger(name=__name__, category="responses")
class StreamingResponseOrchestrator:
def __init__(
self,
inference_api: Inference,
ctx: ChatCompletionContext,
response_id: str,
created_at: int,
text: OpenAIResponseText,
max_infer_iters: int,
tool_executor, # Will be the tool execution logic from the main class
):
self.inference_api = inference_api
self.ctx = ctx
self.response_id = response_id
self.created_at = created_at
self.text = text
self.max_infer_iters = max_infer_iters
self.tool_executor = tool_executor
self.sequence_number = 0
# Store MCP tool mapping that gets built during tool processing
self.mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] = {}
async def create_response(self) -> AsyncIterator[OpenAIResponseObjectStream]:
# Initialize output messages
output_messages: list[OpenAIResponseOutput] = []
# Create initial response and emit response.created immediately
initial_response = OpenAIResponseObject(
created_at=self.created_at,
id=self.response_id,
model=self.ctx.model,
object="response",
status="in_progress",
output=output_messages.copy(),
text=self.text,
)
yield OpenAIResponseObjectStreamResponseCreated(response=initial_response)
# Process all tools (including MCP tools) and emit streaming events
if self.ctx.response_tools:
async for stream_event in self._process_tools(self.ctx.response_tools, output_messages):
yield stream_event
n_iter = 0
messages = self.ctx.messages.copy()
while True:
completion_result = await self.inference_api.openai_chat_completion(
model=self.ctx.model,
messages=messages,
tools=self.ctx.chat_tools,
stream=True,
temperature=self.ctx.temperature,
response_format=self.ctx.response_format,
)
# Process streaming chunks and build complete response
completion_result_data = None
async for stream_event_or_result in self._process_streaming_chunks(completion_result, output_messages):
if isinstance(stream_event_or_result, ChatCompletionResult):
completion_result_data = stream_event_or_result
else:
yield stream_event_or_result
if not completion_result_data:
raise ValueError("Streaming chunk processor failed to return completion data")
current_response = self._build_chat_completion(completion_result_data)
function_tool_calls, non_function_tool_calls, next_turn_messages = self._separate_tool_calls(
current_response, messages
)
# Handle choices with no tool calls
for choice in current_response.choices:
if not (choice.message.tool_calls and self.ctx.response_tools):
output_messages.append(await convert_chat_choice_to_response_message(choice))
# Execute tool calls and coordinate results
async for stream_event in self._coordinate_tool_execution(
function_tool_calls,
non_function_tool_calls,
completion_result_data,
output_messages,
next_turn_messages,
):
yield stream_event
if not function_tool_calls and not non_function_tool_calls:
break
if function_tool_calls:
logger.info("Exiting inference loop since there is a function (client-side) tool call")
break
n_iter += 1
if n_iter >= self.max_infer_iters:
logger.info(f"Exiting inference loop since iteration count({n_iter}) exceeds {self.max_infer_iters=}")
break
messages = next_turn_messages
# Create final response
final_response = OpenAIResponseObject(
created_at=self.created_at,
id=self.response_id,
model=self.ctx.model,
object="response",
status="completed",
text=self.text,
output=output_messages,
)
# Emit response.completed
yield OpenAIResponseObjectStreamResponseCompleted(response=final_response)
def _separate_tool_calls(self, current_response, messages) -> tuple[list, list, list]:
"""Separate tool calls into function and non-function categories."""
function_tool_calls = []
non_function_tool_calls = []
next_turn_messages = messages.copy()
for choice in current_response.choices:
next_turn_messages.append(choice.message)
if choice.message.tool_calls and self.ctx.response_tools:
for tool_call in choice.message.tool_calls:
if is_function_tool_call(tool_call, self.ctx.response_tools):
function_tool_calls.append(tool_call)
else:
non_function_tool_calls.append(tool_call)
return function_tool_calls, non_function_tool_calls, next_turn_messages
async def _process_streaming_chunks(
self, completion_result, output_messages: list[OpenAIResponseOutput]
) -> AsyncIterator[OpenAIResponseObjectStream | ChatCompletionResult]:
"""Process streaming chunks and emit events, returning completion data."""
# Initialize result tracking
chat_response_id = ""
chat_response_content = []
chat_response_tool_calls: dict[int, OpenAIChatCompletionToolCall] = {}
chunk_created = 0
chunk_model = ""
chunk_finish_reason = ""
# Create a placeholder message item for delta events
message_item_id = f"msg_{uuid.uuid4()}"
# Track tool call items for streaming events
tool_call_item_ids: dict[int, str] = {}
# Track content parts for streaming events
content_part_emitted = False
async for chunk in completion_result:
chat_response_id = chunk.id
chunk_created = chunk.created
chunk_model = chunk.model
for chunk_choice in chunk.choices:
# Emit incremental text content as delta events
if chunk_choice.delta.content:
# Emit content_part.added event for first text chunk
if not content_part_emitted:
content_part_emitted = True
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseContentPartAdded(
response_id=self.response_id,
item_id=message_item_id,
part=OpenAIResponseContentPartOutputText(
text="", # Will be filled incrementally via text deltas
),
sequence_number=self.sequence_number,
)
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseOutputTextDelta(
content_index=0,
delta=chunk_choice.delta.content,
item_id=message_item_id,
output_index=0,
sequence_number=self.sequence_number,
)
# Collect content for final response
chat_response_content.append(chunk_choice.delta.content or "")
if chunk_choice.finish_reason:
chunk_finish_reason = chunk_choice.finish_reason
# Aggregate tool call arguments across chunks
if chunk_choice.delta.tool_calls:
for tool_call in chunk_choice.delta.tool_calls:
response_tool_call = chat_response_tool_calls.get(tool_call.index, None)
# Create new tool call entry if this is the first chunk for this index
is_new_tool_call = response_tool_call is None
if is_new_tool_call:
tool_call_dict: dict[str, Any] = tool_call.model_dump()
tool_call_dict.pop("type", None)
response_tool_call = OpenAIChatCompletionToolCall(**tool_call_dict)
chat_response_tool_calls[tool_call.index] = response_tool_call
# Create item ID for this tool call for streaming events
tool_call_item_id = f"fc_{uuid.uuid4()}"
tool_call_item_ids[tool_call.index] = tool_call_item_id
# Emit output_item.added event for the new function call
self.sequence_number += 1
function_call_item = OpenAIResponseOutputMessageFunctionToolCall(
arguments="", # Will be filled incrementally via delta events
call_id=tool_call.id or "",
name=tool_call.function.name if tool_call.function else "",
id=tool_call_item_id,
status="in_progress",
)
yield OpenAIResponseObjectStreamResponseOutputItemAdded(
response_id=self.response_id,
item=function_call_item,
output_index=len(output_messages),
sequence_number=self.sequence_number,
)
# Stream tool call arguments as they arrive (differentiate between MCP and function calls)
if tool_call.function and tool_call.function.arguments:
tool_call_item_id = tool_call_item_ids[tool_call.index]
self.sequence_number += 1
# Check if this is an MCP tool call
is_mcp_tool = tool_call.function.name and tool_call.function.name in self.mcp_tool_to_server
if is_mcp_tool:
# Emit MCP-specific argument delta event
yield OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta(
delta=tool_call.function.arguments,
item_id=tool_call_item_id,
output_index=len(output_messages),
sequence_number=self.sequence_number,
)
else:
# Emit function call argument delta event
yield OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta(
delta=tool_call.function.arguments,
item_id=tool_call_item_id,
output_index=len(output_messages),
sequence_number=self.sequence_number,
)
# Accumulate arguments for final response (only for subsequent chunks)
if not is_new_tool_call:
response_tool_call.function.arguments = (
response_tool_call.function.arguments or ""
) + tool_call.function.arguments
# Emit arguments.done events for completed tool calls (differentiate between MCP and function calls)
for tool_call_index in sorted(chat_response_tool_calls.keys()):
tool_call_item_id = tool_call_item_ids[tool_call_index]
final_arguments = chat_response_tool_calls[tool_call_index].function.arguments or ""
tool_call_name = chat_response_tool_calls[tool_call_index].function.name
# Check if this is an MCP tool call
is_mcp_tool = tool_call_name and tool_call_name in self.mcp_tool_to_server
self.sequence_number += 1
done_event_cls = (
OpenAIResponseObjectStreamResponseMcpCallArgumentsDone
if is_mcp_tool
else OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone
)
yield done_event_cls(
arguments=final_arguments,
item_id=tool_call_item_id,
output_index=len(output_messages),
sequence_number=self.sequence_number,
)
# Emit content_part.done event if text content was streamed (before content gets cleared)
if content_part_emitted:
final_text = "".join(chat_response_content)
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseContentPartDone(
response_id=self.response_id,
item_id=message_item_id,
part=OpenAIResponseContentPartOutputText(
text=final_text,
),
sequence_number=self.sequence_number,
)
# Clear content when there are tool calls (OpenAI spec behavior)
if chat_response_tool_calls:
chat_response_content = []
yield ChatCompletionResult(
response_id=chat_response_id,
content=chat_response_content,
tool_calls=chat_response_tool_calls,
created=chunk_created,
model=chunk_model,
finish_reason=chunk_finish_reason,
message_item_id=message_item_id,
tool_call_item_ids=tool_call_item_ids,
content_part_emitted=content_part_emitted,
)
def _build_chat_completion(self, result: ChatCompletionResult) -> OpenAIChatCompletion:
"""Build OpenAIChatCompletion from ChatCompletionResult."""
# Convert collected chunks to complete response
if result.tool_calls:
tool_calls = [result.tool_calls[i] for i in sorted(result.tool_calls.keys())]
else:
tool_calls = None
assistant_message = OpenAIAssistantMessageParam(
content=result.content_text,
tool_calls=tool_calls,
)
return OpenAIChatCompletion(
id=result.response_id,
choices=[
OpenAIChoice(
message=assistant_message,
finish_reason=result.finish_reason,
index=0,
)
],
created=result.created,
model=result.model,
)
async def _coordinate_tool_execution(
self,
function_tool_calls: list,
non_function_tool_calls: list,
completion_result_data: ChatCompletionResult,
output_messages: list[OpenAIResponseOutput],
next_turn_messages: list,
) -> AsyncIterator[OpenAIResponseObjectStream]:
"""Coordinate execution of both function and non-function tool calls."""
# Execute non-function tool calls
for tool_call in non_function_tool_calls:
# Find the item_id for this tool call
matching_item_id = None
for index, item_id in completion_result_data.tool_call_item_ids.items():
response_tool_call = completion_result_data.tool_calls.get(index)
if response_tool_call and response_tool_call.id == tool_call.id:
matching_item_id = item_id
break
# Use a fallback item_id if not found
if not matching_item_id:
matching_item_id = f"tc_{uuid.uuid4()}"
# Execute tool call with streaming
tool_call_log = None
tool_response_message = None
async for result in self.tool_executor.execute_tool_call(
tool_call,
self.ctx,
self.sequence_number,
len(output_messages),
matching_item_id,
self.mcp_tool_to_server,
):
if result.stream_event:
# Forward streaming events
self.sequence_number = result.sequence_number
yield result.stream_event
if result.final_output_message is not None:
tool_call_log = result.final_output_message
tool_response_message = result.final_input_message
self.sequence_number = result.sequence_number
if tool_call_log:
output_messages.append(tool_call_log)
# Emit output_item.done event for completed non-function tool call
if matching_item_id:
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseOutputItemDone(
response_id=self.response_id,
item=tool_call_log,
output_index=len(output_messages) - 1,
sequence_number=self.sequence_number,
)
if tool_response_message:
next_turn_messages.append(tool_response_message)
# Execute function tool calls (client-side)
for tool_call in function_tool_calls:
# Find the item_id for this tool call from our tracking dictionary
matching_item_id = None
for index, item_id in completion_result_data.tool_call_item_ids.items():
response_tool_call = completion_result_data.tool_calls.get(index)
if response_tool_call and response_tool_call.id == tool_call.id:
matching_item_id = item_id
break
# Use existing item_id or create new one if not found
final_item_id = matching_item_id or f"fc_{uuid.uuid4()}"
function_call_item = OpenAIResponseOutputMessageFunctionToolCall(
arguments=tool_call.function.arguments or "",
call_id=tool_call.id,
name=tool_call.function.name or "",
id=final_item_id,
status="completed",
)
output_messages.append(function_call_item)
# Emit output_item.done event for completed function call
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseOutputItemDone(
response_id=self.response_id,
item=function_call_item,
output_index=len(output_messages) - 1,
sequence_number=self.sequence_number,
)
async def _process_tools(
self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput]
) -> AsyncIterator[OpenAIResponseObjectStream]:
"""Process all tools and emit appropriate streaming events."""
from openai.types.chat import ChatCompletionToolParam
from llama_stack.apis.tools import Tool
from llama_stack.models.llama.datatypes import ToolDefinition, ToolParamDefinition
from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool
def make_openai_tool(tool_name: str, tool: Tool) -> ChatCompletionToolParam:
tool_def = ToolDefinition(
tool_name=tool_name,
description=tool.description,
parameters={
param.name: ToolParamDefinition(
param_type=param.parameter_type,
description=param.description,
required=param.required,
default=param.default,
)
for param in tool.parameters
},
)
return convert_tooldef_to_openai_tool(tool_def)
# Initialize chat_tools if not already set
if self.ctx.chat_tools is None:
self.ctx.chat_tools = []
for input_tool in tools:
if input_tool.type == "function":
self.ctx.chat_tools.append(ChatCompletionToolParam(type="function", function=input_tool.model_dump()))
elif input_tool.type in WebSearchToolTypes:
tool_name = "web_search"
# Need to access tool_groups_api from tool_executor
tool = await self.tool_executor.tool_groups_api.get_tool(tool_name)
if not tool:
raise ValueError(f"Tool {tool_name} not found")
self.ctx.chat_tools.append(make_openai_tool(tool_name, tool))
elif input_tool.type == "file_search":
tool_name = "knowledge_search"
tool = await self.tool_executor.tool_groups_api.get_tool(tool_name)
if not tool:
raise ValueError(f"Tool {tool_name} not found")
self.ctx.chat_tools.append(make_openai_tool(tool_name, tool))
elif input_tool.type == "mcp":
async for stream_event in self._process_mcp_tool(input_tool, output_messages):
yield stream_event
else:
raise ValueError(f"Llama Stack OpenAI Responses does not yet support tool type: {input_tool.type}")
async def _process_mcp_tool(
self, mcp_tool: OpenAIResponseInputToolMCP, output_messages: list[OpenAIResponseOutput]
) -> AsyncIterator[OpenAIResponseObjectStream]:
"""Process an MCP tool configuration and emit appropriate streaming events."""
from llama_stack.providers.utils.tools.mcp import list_mcp_tools
# Emit mcp_list_tools.in_progress
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseMcpListToolsInProgress(
sequence_number=self.sequence_number,
)
try:
# Parse allowed/never allowed tools
always_allowed = None
never_allowed = None
if mcp_tool.allowed_tools:
if isinstance(mcp_tool.allowed_tools, list):
always_allowed = mcp_tool.allowed_tools
elif isinstance(mcp_tool.allowed_tools, AllowedToolsFilter):
always_allowed = mcp_tool.allowed_tools.always
never_allowed = mcp_tool.allowed_tools.never
# Call list_mcp_tools
tool_defs = await list_mcp_tools(
endpoint=mcp_tool.server_url,
headers=mcp_tool.headers or {},
)
# Create the MCP list tools message
mcp_list_message = OpenAIResponseOutputMessageMCPListTools(
id=f"mcp_list_{uuid.uuid4()}",
server_label=mcp_tool.server_label,
tools=[],
)
# Process tools and update context
for t in tool_defs.data:
if never_allowed and t.name in never_allowed:
continue
if not always_allowed or t.name in always_allowed:
# Add to chat tools for inference
from llama_stack.models.llama.datatypes import ToolDefinition, ToolParamDefinition
from llama_stack.providers.utils.inference.openai_compat import convert_tooldef_to_openai_tool
tool_def = ToolDefinition(
tool_name=t.name,
description=t.description,
parameters={
param.name: ToolParamDefinition(
param_type=param.parameter_type,
description=param.description,
required=param.required,
default=param.default,
)
for param in t.parameters
},
)
openai_tool = convert_tooldef_to_openai_tool(tool_def)
if self.ctx.chat_tools is None:
self.ctx.chat_tools = []
self.ctx.chat_tools.append(openai_tool)
# Add to MCP tool mapping
if t.name in self.mcp_tool_to_server:
raise ValueError(f"Duplicate tool name {t.name} found for server {mcp_tool.server_label}")
self.mcp_tool_to_server[t.name] = mcp_tool
# Add to MCP list message
mcp_list_message.tools.append(
MCPListToolsTool(
name=t.name,
description=t.description,
input_schema={
"type": "object",
"properties": {
p.name: {
"type": p.parameter_type,
"description": p.description,
}
for p in t.parameters
},
"required": [p.name for p in t.parameters if p.required],
},
)
)
# Add the MCP list message to output
output_messages.append(mcp_list_message)
# Emit output_item.added for the MCP list tools message
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseOutputItemAdded(
response_id=self.response_id,
item=mcp_list_message,
output_index=len(output_messages) - 1,
sequence_number=self.sequence_number,
)
# Emit mcp_list_tools.completed
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseMcpListToolsCompleted(
sequence_number=self.sequence_number,
)
# Emit output_item.done for the MCP list tools message
self.sequence_number += 1
yield OpenAIResponseObjectStreamResponseOutputItemDone(
response_id=self.response_id,
item=mcp_list_message,
output_index=len(output_messages) - 1,
sequence_number=self.sequence_number,
)
except Exception as e:
# TODO: Emit mcp_list_tools.failed event if needed
logger.exception(f"Failed to list MCP tools from {mcp_tool.server_url}: {e}")
raise

View file

@ -0,0 +1,379 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import asyncio
import json
from collections.abc import AsyncIterator
from llama_stack.apis.agents.openai_responses import (
OpenAIResponseInputToolFileSearch,
OpenAIResponseInputToolMCP,
OpenAIResponseObjectStreamResponseMcpCallCompleted,
OpenAIResponseObjectStreamResponseMcpCallFailed,
OpenAIResponseObjectStreamResponseMcpCallInProgress,
OpenAIResponseObjectStreamResponseWebSearchCallCompleted,
OpenAIResponseObjectStreamResponseWebSearchCallInProgress,
OpenAIResponseObjectStreamResponseWebSearchCallSearching,
OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFileSearchToolCallResults,
OpenAIResponseOutputMessageWebSearchToolCall,
)
from llama_stack.apis.common.content_types import (
ImageContentItem,
TextContentItem,
)
from llama_stack.apis.inference import (
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionToolCall,
OpenAIImageURL,
OpenAIToolMessageParam,
)
from llama_stack.apis.tools import ToolGroups, ToolInvocationResult, ToolRuntime
from llama_stack.apis.vector_io import VectorIO
from llama_stack.log import get_logger
from .types import ChatCompletionContext, ToolExecutionResult
logger = get_logger(name=__name__, category="responses")
class ToolExecutor:
def __init__(
self,
tool_groups_api: ToolGroups,
tool_runtime_api: ToolRuntime,
vector_io_api: VectorIO,
):
self.tool_groups_api = tool_groups_api
self.tool_runtime_api = tool_runtime_api
self.vector_io_api = vector_io_api
async def execute_tool_call(
self,
tool_call: OpenAIChatCompletionToolCall,
ctx: ChatCompletionContext,
sequence_number: int,
output_index: int,
item_id: str,
mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] | None = None,
) -> AsyncIterator[ToolExecutionResult]:
tool_call_id = tool_call.id
function = tool_call.function
tool_kwargs = json.loads(function.arguments) if function.arguments else {}
if not function or not tool_call_id or not function.name:
yield ToolExecutionResult(sequence_number=sequence_number)
return
# Emit progress events for tool execution start
async for event_result in self._emit_progress_events(
function.name, ctx, sequence_number, output_index, item_id, mcp_tool_to_server
):
sequence_number = event_result.sequence_number
yield event_result
# Execute the actual tool call
error_exc, result = await self._execute_tool(function.name, tool_kwargs, ctx, mcp_tool_to_server)
# Emit completion events for tool execution
has_error = error_exc or (result and ((result.error_code and result.error_code > 0) or result.error_message))
async for event_result in self._emit_completion_events(
function.name, ctx, sequence_number, output_index, item_id, has_error, mcp_tool_to_server
):
sequence_number = event_result.sequence_number
yield event_result
# Build result messages from tool execution
output_message, input_message = await self._build_result_messages(
function, tool_call_id, tool_kwargs, ctx, error_exc, result, has_error, mcp_tool_to_server
)
# Yield the final result
yield ToolExecutionResult(
sequence_number=sequence_number, final_output_message=output_message, final_input_message=input_message
)
async def _execute_knowledge_search_via_vector_store(
self,
query: str,
response_file_search_tool: OpenAIResponseInputToolFileSearch,
) -> ToolInvocationResult:
"""Execute knowledge search using vector_stores.search API with filters support."""
search_results = []
# Create search tasks for all vector stores
async def search_single_store(vector_store_id):
try:
search_response = await self.vector_io_api.openai_search_vector_store(
vector_store_id=vector_store_id,
query=query,
filters=response_file_search_tool.filters,
max_num_results=response_file_search_tool.max_num_results,
ranking_options=response_file_search_tool.ranking_options,
rewrite_query=False,
)
return search_response.data
except Exception as e:
logger.warning(f"Failed to search vector store {vector_store_id}: {e}")
return []
# Run all searches in parallel using gather
search_tasks = [search_single_store(vid) for vid in response_file_search_tool.vector_store_ids]
all_results = await asyncio.gather(*search_tasks)
# Flatten results
for results in all_results:
search_results.extend(results)
# Convert search results to tool result format matching memory.py
# Format the results as interleaved content similar to memory.py
content_items = []
content_items.append(
TextContentItem(
text=f"knowledge_search tool found {len(search_results)} chunks:\nBEGIN of knowledge_search tool results.\n"
)
)
for i, result_item in enumerate(search_results):
chunk_text = result_item.content[0].text if result_item.content else ""
metadata_text = f"document_id: {result_item.file_id}, score: {result_item.score}"
if result_item.attributes:
metadata_text += f", attributes: {result_item.attributes}"
text_content = f"[{i + 1}] {metadata_text}\n{chunk_text}\n"
content_items.append(TextContentItem(text=text_content))
content_items.append(TextContentItem(text="END of knowledge_search tool results.\n"))
content_items.append(
TextContentItem(
text=f'The above results were retrieved to help answer the user\'s query: "{query}". Use them as supporting information only in answering this query.\n',
)
)
return ToolInvocationResult(
content=content_items,
metadata={
"document_ids": [r.file_id for r in search_results],
"chunks": [r.content[0].text if r.content else "" for r in search_results],
"scores": [r.score for r in search_results],
},
)
async def _emit_progress_events(
self,
function_name: str,
ctx: ChatCompletionContext,
sequence_number: int,
output_index: int,
item_id: str,
mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] | None = None,
) -> AsyncIterator[ToolExecutionResult]:
"""Emit progress events for tool execution start."""
# Emit in_progress event based on tool type (only for tools with specific streaming events)
progress_event = None
if mcp_tool_to_server and function_name in mcp_tool_to_server:
sequence_number += 1
progress_event = OpenAIResponseObjectStreamResponseMcpCallInProgress(
item_id=item_id,
output_index=output_index,
sequence_number=sequence_number,
)
elif function_name == "web_search":
sequence_number += 1
progress_event = OpenAIResponseObjectStreamResponseWebSearchCallInProgress(
item_id=item_id,
output_index=output_index,
sequence_number=sequence_number,
)
# Note: knowledge_search and other custom tools don't have specific streaming events in OpenAI spec
if progress_event:
yield ToolExecutionResult(stream_event=progress_event, sequence_number=sequence_number)
# For web search, emit searching event
if function_name == "web_search":
sequence_number += 1
searching_event = OpenAIResponseObjectStreamResponseWebSearchCallSearching(
item_id=item_id,
output_index=output_index,
sequence_number=sequence_number,
)
yield ToolExecutionResult(stream_event=searching_event, sequence_number=sequence_number)
async def _execute_tool(
self,
function_name: str,
tool_kwargs: dict,
ctx: ChatCompletionContext,
mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] | None = None,
) -> tuple[Exception | None, any]:
"""Execute the tool and return error exception and result."""
error_exc = None
result = None
try:
if mcp_tool_to_server and function_name in mcp_tool_to_server:
from llama_stack.providers.utils.tools.mcp import invoke_mcp_tool
mcp_tool = mcp_tool_to_server[function_name]
result = await invoke_mcp_tool(
endpoint=mcp_tool.server_url,
headers=mcp_tool.headers or {},
tool_name=function_name,
kwargs=tool_kwargs,
)
elif function_name == "knowledge_search":
response_file_search_tool = next(
(t for t in ctx.response_tools if isinstance(t, OpenAIResponseInputToolFileSearch)),
None,
)
if response_file_search_tool:
# Use vector_stores.search API instead of knowledge_search tool
# to support filters and ranking_options
query = tool_kwargs.get("query", "")
result = await self._execute_knowledge_search_via_vector_store(
query=query,
response_file_search_tool=response_file_search_tool,
)
else:
result = await self.tool_runtime_api.invoke_tool(
tool_name=function_name,
kwargs=tool_kwargs,
)
except Exception as e:
error_exc = e
return error_exc, result
async def _emit_completion_events(
self,
function_name: str,
ctx: ChatCompletionContext,
sequence_number: int,
output_index: int,
item_id: str,
has_error: bool,
mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] | None = None,
) -> AsyncIterator[ToolExecutionResult]:
"""Emit completion or failure events for tool execution."""
completion_event = None
if mcp_tool_to_server and function_name in mcp_tool_to_server:
sequence_number += 1
if has_error:
completion_event = OpenAIResponseObjectStreamResponseMcpCallFailed(
sequence_number=sequence_number,
)
else:
completion_event = OpenAIResponseObjectStreamResponseMcpCallCompleted(
sequence_number=sequence_number,
)
elif function_name == "web_search":
sequence_number += 1
completion_event = OpenAIResponseObjectStreamResponseWebSearchCallCompleted(
item_id=item_id,
output_index=output_index,
sequence_number=sequence_number,
)
# Note: knowledge_search and other custom tools don't have specific completion events in OpenAI spec
if completion_event:
yield ToolExecutionResult(stream_event=completion_event, sequence_number=sequence_number)
async def _build_result_messages(
self,
function,
tool_call_id: str,
tool_kwargs: dict,
ctx: ChatCompletionContext,
error_exc: Exception | None,
result: any,
has_error: bool,
mcp_tool_to_server: dict[str, OpenAIResponseInputToolMCP] | None = None,
) -> tuple[any, any]:
"""Build output and input messages from tool execution results."""
from llama_stack.providers.utils.inference.prompt_adapter import (
interleaved_content_as_str,
)
# Build output message
if mcp_tool_to_server and function.name in mcp_tool_to_server:
from llama_stack.apis.agents.openai_responses import (
OpenAIResponseOutputMessageMCPCall,
)
message = OpenAIResponseOutputMessageMCPCall(
id=tool_call_id,
arguments=function.arguments,
name=function.name,
server_label=mcp_tool_to_server[function.name].server_label,
)
if error_exc:
message.error = str(error_exc)
elif (result and result.error_code and result.error_code > 0) or (result and result.error_message):
message.error = f"Error (code {result.error_code}): {result.error_message}"
elif result and result.content:
message.output = interleaved_content_as_str(result.content)
else:
if function.name == "web_search":
message = OpenAIResponseOutputMessageWebSearchToolCall(
id=tool_call_id,
status="completed",
)
if has_error:
message.status = "failed"
elif function.name == "knowledge_search":
message = OpenAIResponseOutputMessageFileSearchToolCall(
id=tool_call_id,
queries=[tool_kwargs.get("query", "")],
status="completed",
)
if result and "document_ids" in result.metadata:
message.results = []
for i, doc_id in enumerate(result.metadata["document_ids"]):
text = result.metadata["chunks"][i] if "chunks" in result.metadata else None
score = result.metadata["scores"][i] if "scores" in result.metadata else None
message.results.append(
OpenAIResponseOutputMessageFileSearchToolCallResults(
file_id=doc_id,
filename=doc_id,
text=text,
score=score,
attributes={},
)
)
if has_error:
message.status = "failed"
else:
raise ValueError(f"Unknown tool {function.name} called")
# Build input message
input_message = None
if result and result.content:
if isinstance(result.content, str):
content = result.content
elif isinstance(result.content, list):
content = []
for item in result.content:
if isinstance(item, TextContentItem):
part = OpenAIChatCompletionContentPartTextParam(text=item.text)
elif isinstance(item, ImageContentItem):
if item.image.data:
url = f"data:image;base64,{item.image.data}"
else:
url = item.image.url
part = OpenAIChatCompletionContentPartImageParam(image_url=OpenAIImageURL(url=url))
else:
raise ValueError(f"Unknown result content type: {type(item)}")
content.append(part)
else:
raise ValueError(f"Unknown result content type: {type(result.content)}")
input_message = OpenAIToolMessageParam(content=content, tool_call_id=tool_call_id)
else:
text = str(error_exc) if error_exc else "Tool execution failed"
input_message = OpenAIToolMessageParam(content=text, tool_call_id=tool_call_id)
return message, input_message

View file

@ -0,0 +1,60 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from dataclasses import dataclass
from openai.types.chat import ChatCompletionToolParam
from pydantic import BaseModel
from llama_stack.apis.agents.openai_responses import (
OpenAIResponseInputTool,
OpenAIResponseObjectStream,
OpenAIResponseOutput,
)
from llama_stack.apis.inference import OpenAIChatCompletionToolCall, OpenAIMessageParam, OpenAIResponseFormatParam
class ToolExecutionResult(BaseModel):
"""Result of streaming tool execution."""
stream_event: OpenAIResponseObjectStream | None = None
sequence_number: int
final_output_message: OpenAIResponseOutput | None = None
final_input_message: OpenAIMessageParam | None = None
@dataclass
class ChatCompletionResult:
"""Result of processing streaming chat completion chunks."""
response_id: str
content: list[str]
tool_calls: dict[int, OpenAIChatCompletionToolCall]
created: int
model: str
finish_reason: str
message_item_id: str # For streaming events
tool_call_item_ids: dict[int, str] # For streaming events
content_part_emitted: bool # Tracking state
@property
def content_text(self) -> str:
"""Get joined content as string."""
return "".join(self.content)
@property
def has_tool_calls(self) -> bool:
"""Check if there are any tool calls."""
return bool(self.tool_calls)
class ChatCompletionContext(BaseModel):
model: str
messages: list[OpenAIMessageParam]
response_tools: list[OpenAIResponseInputTool] | None = None
chat_tools: list[ChatCompletionToolParam] | None = None
temperature: float | None
response_format: OpenAIResponseFormatParam

View file

@ -0,0 +1,169 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import uuid
from llama_stack.apis.agents.openai_responses import (
OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputMessageContent,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseMessage,
OpenAIResponseOutputMessageContent,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseText,
)
from llama_stack.apis.inference import (
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction,
OpenAIChoice,
OpenAIDeveloperMessageParam,
OpenAIImageURL,
OpenAIJSONSchema,
OpenAIMessageParam,
OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema,
OpenAIResponseFormatParam,
OpenAIResponseFormatText,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
OpenAIUserMessageParam,
)
async def convert_chat_choice_to_response_message(choice: OpenAIChoice) -> OpenAIResponseMessage:
"""Convert an OpenAI Chat Completion choice into an OpenAI Response output message."""
output_content = ""
if isinstance(choice.message.content, str):
output_content = choice.message.content
elif isinstance(choice.message.content, OpenAIChatCompletionContentPartTextParam):
output_content = choice.message.content.text
else:
raise ValueError(
f"Llama Stack OpenAI Responses does not yet support output content type: {type(choice.message.content)}"
)
return OpenAIResponseMessage(
id=f"msg_{uuid.uuid4()}",
content=[OpenAIResponseOutputMessageContentOutputText(text=output_content)],
status="completed",
role="assistant",
)
async def convert_response_content_to_chat_content(
content: (str | list[OpenAIResponseInputMessageContent] | list[OpenAIResponseOutputMessageContent]),
) -> str | list[OpenAIChatCompletionContentPartParam]:
"""
Convert the content parts from an OpenAI Response API request into OpenAI Chat Completion content parts.
The content schemas of each API look similar, but are not exactly the same.
"""
if isinstance(content, str):
return content
converted_parts = []
for content_part in content:
if isinstance(content_part, OpenAIResponseInputMessageContentText):
converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part.text))
elif isinstance(content_part, OpenAIResponseOutputMessageContentOutputText):
converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part.text))
elif isinstance(content_part, OpenAIResponseInputMessageContentImage):
if content_part.image_url:
image_url = OpenAIImageURL(url=content_part.image_url, detail=content_part.detail)
converted_parts.append(OpenAIChatCompletionContentPartImageParam(image_url=image_url))
elif isinstance(content_part, str):
converted_parts.append(OpenAIChatCompletionContentPartTextParam(text=content_part))
else:
raise ValueError(
f"Llama Stack OpenAI Responses does not yet support content type '{type(content_part)}' in this context"
)
return converted_parts
async def convert_response_input_to_chat_messages(
input: str | list[OpenAIResponseInput],
) -> list[OpenAIMessageParam]:
"""
Convert the input from an OpenAI Response API request into OpenAI Chat Completion messages.
"""
messages: list[OpenAIMessageParam] = []
if isinstance(input, list):
for input_item in input:
if isinstance(input_item, OpenAIResponseInputFunctionToolCallOutput):
messages.append(
OpenAIToolMessageParam(
content=input_item.output,
tool_call_id=input_item.call_id,
)
)
elif isinstance(input_item, OpenAIResponseOutputMessageFunctionToolCall):
tool_call = OpenAIChatCompletionToolCall(
index=0,
id=input_item.call_id,
function=OpenAIChatCompletionToolCallFunction(
name=input_item.name,
arguments=input_item.arguments,
),
)
messages.append(OpenAIAssistantMessageParam(tool_calls=[tool_call]))
else:
content = await convert_response_content_to_chat_content(input_item.content)
message_type = await get_message_type_by_role(input_item.role)
if message_type is None:
raise ValueError(
f"Llama Stack OpenAI Responses does not yet support message role '{input_item.role}' in this context"
)
messages.append(message_type(content=content))
else:
messages.append(OpenAIUserMessageParam(content=input))
return messages
async def convert_response_text_to_chat_response_format(
text: OpenAIResponseText,
) -> OpenAIResponseFormatParam:
"""
Convert an OpenAI Response text parameter into an OpenAI Chat Completion response format.
"""
if not text.format or text.format["type"] == "text":
return OpenAIResponseFormatText(type="text")
if text.format["type"] == "json_object":
return OpenAIResponseFormatJSONObject()
if text.format["type"] == "json_schema":
return OpenAIResponseFormatJSONSchema(
json_schema=OpenAIJSONSchema(name=text.format["name"], schema=text.format["schema"])
)
raise ValueError(f"Unsupported text format: {text.format}")
async def get_message_type_by_role(role: str):
role_to_type = {
"user": OpenAIUserMessageParam,
"system": OpenAISystemMessageParam,
"assistant": OpenAIAssistantMessageParam,
"developer": OpenAIDeveloperMessageParam,
}
return role_to_type.get(role)
def is_function_tool_call(
tool_call: OpenAIChatCompletionToolCall,
tools: list[OpenAIResponseInputTool],
) -> bool:
if not tool_call.function:
return False
for t in tools:
if t.type == "function" and t.name == tool_call.function.name:
return True
return False

View file

@ -31,15 +31,15 @@ from openai.types.chat import (
from openai.types.chat import ( from openai.types.chat import (
ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam, ChatCompletionContentPartTextParam as OpenAIChatCompletionContentPartTextParam,
) )
from openai.types.chat import (
ChatCompletionMessageFunctionToolCall as OpenAIChatCompletionMessageFunctionToolCall,
)
from openai.types.chat import ( from openai.types.chat import (
ChatCompletionMessageParam as OpenAIChatCompletionMessage, ChatCompletionMessageParam as OpenAIChatCompletionMessage,
) )
from openai.types.chat import ( from openai.types.chat import (
ChatCompletionMessageToolCall, ChatCompletionMessageToolCall,
) )
from openai.types.chat import (
ChatCompletionMessageToolCallParam as OpenAIChatCompletionMessageToolCall,
)
from openai.types.chat import ( from openai.types.chat import (
ChatCompletionSystemMessageParam as OpenAIChatCompletionSystemMessage, ChatCompletionSystemMessageParam as OpenAIChatCompletionSystemMessage,
) )
@ -633,7 +633,7 @@ async def convert_message_to_openai_dict_new(
) )
elif isinstance(message, CompletionMessage): elif isinstance(message, CompletionMessage):
tool_calls = [ tool_calls = [
OpenAIChatCompletionMessageToolCall( OpenAIChatCompletionMessageFunctionToolCall(
id=tool.call_id, id=tool.call_id,
function=OpenAIFunction( function=OpenAIFunction(
name=(tool.tool_name if not isinstance(tool.tool_name, BuiltinTool) else tool.tool_name.value), name=(tool.tool_name if not isinstance(tool.tool_name, BuiltinTool) else tool.tool_name.value),
@ -903,7 +903,7 @@ def _convert_openai_request_response_format(
def _convert_openai_tool_calls( def _convert_openai_tool_calls(
tool_calls: list[OpenAIChatCompletionMessageToolCall], tool_calls: list[OpenAIChatCompletionMessageFunctionToolCall],
) -> list[ToolCall]: ) -> list[ToolCall]:
""" """
Convert an OpenAI ChatCompletionMessageToolCall list into a list of ToolCall. Convert an OpenAI ChatCompletionMessageToolCall list into a list of ToolCall.

1
llama_stack/ui/.nvmrc Normal file
View file

@ -0,0 +1 @@
22.5.1

View file

@ -1,3 +1,12 @@
# Ignore artifacts: # Ignore artifacts:
build build
coverage coverage
.next
node_modules
dist
*.lock
*.log
# Generated files
*.min.js
*.min.css

View file

@ -1 +1,10 @@
{} {
"semi": true,
"trailingComma": "es5",
"singleQuote": false,
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"bracketSpacing": true,
"arrowParens": "avoid"
}

View file

@ -47,7 +47,7 @@ async function proxyRequest(request: NextRequest, method: string) {
const responseText = await response.text(); const responseText = await response.text();
console.log( console.log(
`Response from FastAPI: ${response.status} ${response.statusText}`, `Response from FastAPI: ${response.status} ${response.statusText}`
); );
// Create response with same status and headers // Create response with same status and headers
@ -74,7 +74,7 @@ async function proxyRequest(request: NextRequest, method: string) {
backend_url: BACKEND_URL, backend_url: BACKEND_URL,
timestamp: new Date().toISOString(), timestamp: new Date().toISOString(),
}, },
{ status: 500 }, { status: 500 }
); );
} }
} }

View file

@ -51,9 +51,9 @@ export default function SignInPage() {
onClick={() => { onClick={() => {
console.log("Signing in with GitHub..."); console.log("Signing in with GitHub...");
signIn("github", { callbackUrl: "/auth/signin" }).catch( signIn("github", { callbackUrl: "/auth/signin" }).catch(
(error) => { error => {
console.error("Sign in error:", error); console.error("Sign in error:", error);
}, }
); );
}} }}
className="w-full" className="w-full"

View file

@ -29,14 +29,13 @@ export default function ChatPlaygroundPage() {
const isModelsLoading = modelsLoading ?? true; const isModelsLoading = modelsLoading ?? true;
useEffect(() => { useEffect(() => {
const fetchModels = async () => { const fetchModels = async () => {
try { try {
setModelsLoading(true); setModelsLoading(true);
setModelsError(null); setModelsError(null);
const modelList = await client.models.list(); const modelList = await client.models.list();
const llmModels = modelList.filter(model => model.model_type === 'llm'); const llmModels = modelList.filter(model => model.model_type === "llm");
setModels(llmModels); setModels(llmModels);
if (llmModels.length > 0) { if (llmModels.length > 0) {
setSelectedModel(llmModels[0].identifier); setSelectedModel(llmModels[0].identifier);
@ -53,19 +52,35 @@ export default function ChatPlaygroundPage() {
}, [client]); }, [client]);
const extractTextContent = (content: unknown): string => { const extractTextContent = (content: unknown): string => {
if (typeof content === 'string') { if (typeof content === "string") {
return content; return content;
} }
if (Array.isArray(content)) { if (Array.isArray(content)) {
return content return content
.filter(item => item && typeof item === 'object' && 'type' in item && item.type === 'text') .filter(
.map(item => (item && typeof item === 'object' && 'text' in item) ? String(item.text) : '') item =>
.join(''); item &&
typeof item === "object" &&
"type" in item &&
item.type === "text"
)
.map(item =>
item && typeof item === "object" && "text" in item
? String(item.text)
: ""
)
.join("");
} }
if (content && typeof content === 'object' && 'type' in content && content.type === 'text' && 'text' in content) { if (
return String(content.text) || ''; content &&
typeof content === "object" &&
"type" in content &&
content.type === "text" &&
"text" in content
) {
return String(content.text) || "";
} }
return ''; return "";
}; };
const handleInputChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => { const handleInputChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
@ -98,7 +113,10 @@ const handleSubmitWithContent = async (content: string) => {
try { try {
const messageParams: CompletionCreateParams["messages"] = [ const messageParams: CompletionCreateParams["messages"] = [
...messages.map(msg => { ...messages.map(msg => {
const msgContent = typeof msg.content === 'string' ? msg.content : extractTextContent(msg.content); const msgContent =
typeof msg.content === "string"
? msg.content
: extractTextContent(msg.content);
if (msg.role === "user") { if (msg.role === "user") {
return { role: "user" as const, content: msgContent }; return { role: "user" as const, content: msgContent };
} else if (msg.role === "assistant") { } else if (msg.role === "assistant") {
@ -107,7 +125,7 @@ const handleSubmitWithContent = async (content: string) => {
return { role: "system" as const, content: msgContent }; return { role: "system" as const, content: msgContent };
} }
}), }),
{ role: "user" as const, content } { role: "user" as const, content },
]; ];
const response = await client.chat.completions.create({ const response = await client.chat.completions.create({
@ -163,7 +181,7 @@ const handleSubmitWithContent = async (content: string) => {
content: message.content, content: message.content,
createdAt: new Date(), createdAt: new Date(),
}; };
setMessages(prev => [...prev, newMessage]) setMessages(prev => [...prev, newMessage]);
handleSubmitWithContent(newMessage.content); handleSubmitWithContent(newMessage.content);
}; };
@ -177,12 +195,20 @@ const handleSubmitWithContent = async (content: string) => {
<div className="mb-4 flex justify-between items-center"> <div className="mb-4 flex justify-between items-center">
<h1 className="text-2xl font-bold">Chat Playground (Completions)</h1> <h1 className="text-2xl font-bold">Chat Playground (Completions)</h1>
<div className="flex gap-2"> <div className="flex gap-2">
<Select value={selectedModel} onValueChange={setSelectedModel} disabled={isModelsLoading || isGenerating}> <Select
value={selectedModel}
onValueChange={setSelectedModel}
disabled={isModelsLoading || isGenerating}
>
<SelectTrigger className="w-[180px]"> <SelectTrigger className="w-[180px]">
<SelectValue placeholder={isModelsLoading ? "Loading models..." : "Select Model"} /> <SelectValue
placeholder={
isModelsLoading ? "Loading models..." : "Select Model"
}
/>
</SelectTrigger> </SelectTrigger>
<SelectContent> <SelectContent>
{models.map((model) => ( {models.map(model => (
<SelectItem key={model.identifier} value={model.identifier}> <SelectItem key={model.identifier} value={model.identifier}>
{model.identifier} {model.identifier}
</SelectItem> </SelectItem>

View file

@ -33,12 +33,12 @@ export default function ChatCompletionDetailPage() {
} catch (err) { } catch (err) {
console.error( console.error(
`Error fetching chat completion detail for ID ${id}:`, `Error fetching chat completion detail for ID ${id}:`,
err, err
); );
setError( setError(
err instanceof Error err instanceof Error
? err ? err
: new Error("Failed to fetch completion detail"), : new Error("Failed to fetch completion detail")
); );
} finally { } finally {
setIsLoading(false); setIsLoading(false);

View file

@ -13,10 +13,10 @@ export default function ResponseDetailPage() {
const client = useAuthClient(); const client = useAuthClient();
const [responseDetail, setResponseDetail] = useState<OpenAIResponse | null>( const [responseDetail, setResponseDetail] = useState<OpenAIResponse | null>(
null, null
); );
const [inputItems, setInputItems] = useState<InputItemListResponse | null>( const [inputItems, setInputItems] = useState<InputItemListResponse | null>(
null, null
); );
const [isLoading, setIsLoading] = useState<boolean>(true); const [isLoading, setIsLoading] = useState<boolean>(true);
const [isLoadingInputItems, setIsLoadingInputItems] = useState<boolean>(true); const [isLoadingInputItems, setIsLoadingInputItems] = useState<boolean>(true);
@ -25,7 +25,7 @@ export default function ResponseDetailPage() {
// Helper function to convert ResponseObject to OpenAIResponse // Helper function to convert ResponseObject to OpenAIResponse
const convertResponseObject = ( const convertResponseObject = (
responseData: ResponseObject, responseData: ResponseObject
): OpenAIResponse => { ): OpenAIResponse => {
return { return {
id: responseData.id, id: responseData.id,
@ -73,12 +73,12 @@ export default function ResponseDetailPage() {
} else { } else {
console.error( console.error(
`Error fetching response detail for ID ${id}:`, `Error fetching response detail for ID ${id}:`,
responseResult.reason, responseResult.reason
); );
setError( setError(
responseResult.reason instanceof Error responseResult.reason instanceof Error
? responseResult.reason ? responseResult.reason
: new Error("Failed to fetch response detail"), : new Error("Failed to fetch response detail")
); );
} }
@ -90,18 +90,18 @@ export default function ResponseDetailPage() {
} else { } else {
console.error( console.error(
`Error fetching input items for response ID ${id}:`, `Error fetching input items for response ID ${id}:`,
inputItemsResult.reason, inputItemsResult.reason
); );
setInputItemsError( setInputItemsError(
inputItemsResult.reason instanceof Error inputItemsResult.reason instanceof Error
? inputItemsResult.reason ? inputItemsResult.reason
: new Error("Failed to fetch input items"), : new Error("Failed to fetch input items")
); );
} }
} catch (err) { } catch (err) {
console.error(`Unexpected error fetching data for ID ${id}:`, err); console.error(`Unexpected error fetching data for ID ${id}:`, err);
setError( setError(
err instanceof Error ? err : new Error("Unexpected error occurred"), err instanceof Error ? err : new Error("Unexpected error occurred")
); );
} finally { } finally {
setIsLoading(false); setIsLoading(false);

View file

@ -18,7 +18,10 @@ import {
PropertiesCard, PropertiesCard,
PropertyItem, PropertyItem,
} from "@/components/layout/detail-layout"; } from "@/components/layout/detail-layout";
import { PageBreadcrumb, BreadcrumbSegment } from "@/components/layout/page-breadcrumb"; import {
PageBreadcrumb,
BreadcrumbSegment,
} from "@/components/layout/page-breadcrumb";
export default function ContentDetailPage() { export default function ContentDetailPage() {
const params = useParams(); const params = useParams();
@ -28,13 +31,13 @@ export default function ContentDetailPage() {
const contentId = params.contentId as string; const contentId = params.contentId as string;
const client = useAuthClient(); const client = useAuthClient();
const getTextFromContent = (content: any): string => { const getTextFromContent = (content: unknown): string => {
if (typeof content === 'string') { if (typeof content === "string") {
return content; return content;
} else if (content && content.type === 'text') { } else if (content && content.type === "text") {
return content.text; return content.text;
} }
return ''; return "";
}; };
const [store, setStore] = useState<VectorStore | null>(null); const [store, setStore] = useState<VectorStore | null>(null);
@ -44,7 +47,9 @@ export default function ContentDetailPage() {
const [error, setError] = useState<Error | null>(null); const [error, setError] = useState<Error | null>(null);
const [isEditing, setIsEditing] = useState(false); const [isEditing, setIsEditing] = useState(false);
const [editedContent, setEditedContent] = useState(""); const [editedContent, setEditedContent] = useState("");
const [editedMetadata, setEditedMetadata] = useState<Record<string, any>>({}); const [editedMetadata, setEditedMetadata] = useState<Record<string, unknown>>(
{}
);
const [isEditingEmbedding, setIsEditingEmbedding] = useState(false); const [isEditingEmbedding, setIsEditingEmbedding] = useState(false);
const [editedEmbedding, setEditedEmbedding] = useState<number[]>([]); const [editedEmbedding, setEditedEmbedding] = useState<number[]>([]);
@ -64,8 +69,13 @@ export default function ContentDetailPage() {
setFile(fileResponse as VectorStoreFile); setFile(fileResponse as VectorStoreFile);
const contentsAPI = new ContentsAPI(client); const contentsAPI = new ContentsAPI(client);
const contentsResponse = await contentsAPI.listContents(vectorStoreId, fileId); const contentsResponse = await contentsAPI.listContents(
const targetContent = contentsResponse.data.find(c => c.id === contentId); vectorStoreId,
fileId
);
const targetContent = contentsResponse.data.find(
c => c.id === contentId
);
if (targetContent) { if (targetContent) {
setContent(targetContent); setContent(targetContent);
@ -76,7 +86,9 @@ export default function ContentDetailPage() {
throw new Error(`Content ${contentId} not found`); throw new Error(`Content ${contentId} not found`);
} }
} catch (err) { } catch (err) {
setError(err instanceof Error ? err : new Error("Failed to load content.")); setError(
err instanceof Error ? err : new Error("Failed to load content.")
);
} finally { } finally {
setIsLoading(false); setIsLoading(false);
} }
@ -88,7 +100,8 @@ export default function ContentDetailPage() {
if (!content) return; if (!content) return;
try { try {
const updates: { content?: string; metadata?: Record<string, any> } = {}; const updates: { content?: string; metadata?: Record<string, unknown> } =
{};
if (editedContent !== getTextFromContent(content.content)) { if (editedContent !== getTextFromContent(content.content)) {
updates.content = editedContent; updates.content = editedContent;
@ -100,25 +113,32 @@ export default function ContentDetailPage() {
if (Object.keys(updates).length > 0) { if (Object.keys(updates).length > 0) {
const contentsAPI = new ContentsAPI(client); const contentsAPI = new ContentsAPI(client);
const updatedContent = await contentsAPI.updateContent(vectorStoreId, fileId, contentId, updates); const updatedContent = await contentsAPI.updateContent(
vectorStoreId,
fileId,
contentId,
updates
);
setContent(updatedContent); setContent(updatedContent);
} }
setIsEditing(false); setIsEditing(false);
} catch (err) { } catch (err) {
console.error('Failed to update content:', err); console.error("Failed to update content:", err);
} }
}; };
const handleDelete = async () => { const handleDelete = async () => {
if (!confirm('Are you sure you want to delete this content?')) return; if (!confirm("Are you sure you want to delete this content?")) return;
try { try {
const contentsAPI = new ContentsAPI(client); const contentsAPI = new ContentsAPI(client);
await contentsAPI.deleteContent(vectorStoreId, fileId, contentId); await contentsAPI.deleteContent(vectorStoreId, fileId, contentId);
router.push(`/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents`); router.push(
`/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents`
);
} catch (err) { } catch (err) {
console.error('Failed to delete content:', err); console.error("Failed to delete content:", err);
} }
}; };
@ -134,10 +154,19 @@ export default function ContentDetailPage() {
const breadcrumbSegments: BreadcrumbSegment[] = [ const breadcrumbSegments: BreadcrumbSegment[] = [
{ label: "Vector Stores", href: "/logs/vector-stores" }, { label: "Vector Stores", href: "/logs/vector-stores" },
{ label: store?.name || vectorStoreId, href: `/logs/vector-stores/${vectorStoreId}` }, {
label: store?.name || vectorStoreId,
href: `/logs/vector-stores/${vectorStoreId}`,
},
{ label: "Files", href: `/logs/vector-stores/${vectorStoreId}` }, { label: "Files", href: `/logs/vector-stores/${vectorStoreId}` },
{ label: fileId, href: `/logs/vector-stores/${vectorStoreId}/files/${fileId}` }, {
{ label: "Contents", href: `/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents` }, label: fileId,
href: `/logs/vector-stores/${vectorStoreId}/files/${fileId}`,
},
{
label: "Contents",
href: `/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents`,
},
{ label: contentId }, { label: contentId },
]; ];
@ -186,7 +215,7 @@ export default function ContentDetailPage() {
{isEditing ? ( {isEditing ? (
<textarea <textarea
value={editedContent} value={editedContent}
onChange={(e) => setEditedContent(e.target.value)} onChange={e => setEditedContent(e.target.value)}
className="w-full h-64 p-3 border rounded-md resize-none font-mono text-sm" className="w-full h-64 p-3 border rounded-md resize-none font-mono text-sm"
placeholder="Enter content..." placeholder="Enter content..."
/> />
@ -206,16 +235,23 @@ export default function ContentDetailPage() {
<div className="flex gap-2"> <div className="flex gap-2">
{isEditingEmbedding ? ( {isEditingEmbedding ? (
<> <>
<Button size="sm" onClick={() => { <Button
size="sm"
onClick={() => {
setIsEditingEmbedding(false); setIsEditingEmbedding(false);
}}> }}
>
<Save className="h-4 w-4 mr-1" /> <Save className="h-4 w-4 mr-1" />
Save Save
</Button> </Button>
<Button size="sm" variant="outline" onClick={() => { <Button
size="sm"
variant="outline"
onClick={() => {
setEditedEmbedding(content?.embedding || []); setEditedEmbedding(content?.embedding || []);
setIsEditingEmbedding(false); setIsEditingEmbedding(false);
}}> }}
>
<X className="h-4 w-4 mr-1" /> <X className="h-4 w-4 mr-1" />
Cancel Cancel
</Button> </Button>
@ -237,14 +273,16 @@ export default function ContentDetailPage() {
</p> </p>
<textarea <textarea
value={JSON.stringify(editedEmbedding, null, 2)} value={JSON.stringify(editedEmbedding, null, 2)}
onChange={(e) => { onChange={e => {
try { try {
const parsed = JSON.parse(e.target.value); const parsed = JSON.parse(e.target.value);
if (Array.isArray(parsed) && parsed.every(v => typeof v === 'number')) { if (
Array.isArray(parsed) &&
parsed.every(v => typeof v === "number")
) {
setEditedEmbedding(parsed); setEditedEmbedding(parsed);
} }
} catch { } catch {}
}
}} }}
className="w-full h-32 p-3 border rounded-md resize-none font-mono text-xs" className="w-full h-32 p-3 border rounded-md resize-none font-mono text-xs"
placeholder="Enter embedding as JSON array..." placeholder="Enter embedding as JSON array..."
@ -259,8 +297,15 @@ export default function ContentDetailPage() {
</div> </div>
<div className="p-3 bg-gray-50 dark:bg-gray-800 rounded-md max-h-32 overflow-y-auto"> <div className="p-3 bg-gray-50 dark:bg-gray-800 rounded-md max-h-32 overflow-y-auto">
<pre className="whitespace-pre-wrap font-mono text-xs text-gray-900 dark:text-gray-100"> <pre className="whitespace-pre-wrap font-mono text-xs text-gray-900 dark:text-gray-100">
[{content.embedding.slice(0, 20).map(v => v.toFixed(6)).join(', ')} [
{content.embedding.length > 20 ? `\n... and ${content.embedding.length - 20} more values` : ''}] {content.embedding
.slice(0, 20)
.map(v => v.toFixed(6))
.join(", ")}
{content.embedding.length > 20
? `\n... and ${content.embedding.length - 20} more values`
: ""}
]
</pre> </pre>
</div> </div>
</div> </div>
@ -284,7 +329,7 @@ export default function ContentDetailPage() {
<div key={key} className="flex gap-2"> <div key={key} className="flex gap-2">
<Input <Input
value={key} value={key}
onChange={(e) => { onChange={e => {
const newMetadata = { ...editedMetadata }; const newMetadata = { ...editedMetadata };
delete newMetadata[key]; delete newMetadata[key];
newMetadata[e.target.value] = value; newMetadata[e.target.value] = value;
@ -294,11 +339,13 @@ export default function ContentDetailPage() {
className="flex-1" className="flex-1"
/> />
<Input <Input
value={typeof value === 'string' ? value : JSON.stringify(value)} value={
onChange={(e) => { typeof value === "string" ? value : JSON.stringify(value)
}
onChange={e => {
setEditedMetadata({ setEditedMetadata({
...editedMetadata, ...editedMetadata,
[key]: e.target.value [key]: e.target.value,
}); });
}} }}
placeholder="Value" placeholder="Value"
@ -312,7 +359,7 @@ export default function ContentDetailPage() {
onClick={() => { onClick={() => {
setEditedMetadata({ setEditedMetadata({
...editedMetadata, ...editedMetadata,
['']: '' [""]: "",
}); });
}} }}
> >
@ -325,7 +372,7 @@ export default function ContentDetailPage() {
<div key={key} className="flex justify-between py-1"> <div key={key} className="flex justify-between py-1">
<span className="font-medium text-gray-600">{key}:</span> <span className="font-medium text-gray-600">{key}:</span>
<span className="font-mono text-sm"> <span className="font-mono text-sm">
{typeof value === 'string' ? value : JSON.stringify(value)} {typeof value === "string" ? value : JSON.stringify(value)}
</span> </span>
</div> </div>
))} ))}
@ -351,15 +398,15 @@ export default function ContentDetailPage() {
value={`${getTextFromContent(content.content).length} chars`} value={`${getTextFromContent(content.content).length} chars`}
/> />
{content.metadata.chunk_window && ( {content.metadata.chunk_window && (
<PropertyItem <PropertyItem label="Position" value={content.metadata.chunk_window} />
label="Position"
value={content.metadata.chunk_window}
/>
)} )}
{file && ( {file && (
<> <>
<PropertyItem label="File Status" value={file.status} /> <PropertyItem label="File Status" value={file.status} />
<PropertyItem label="File Usage" value={`${file.usage_bytes} bytes`} /> <PropertyItem
label="File Usage"
value={`${file.usage_bytes} bytes`}
/>
</> </>
)} )}
{store && ( {store && (

View file

@ -18,7 +18,10 @@ import {
PropertiesCard, PropertiesCard,
PropertyItem, PropertyItem,
} from "@/components/layout/detail-layout"; } from "@/components/layout/detail-layout";
import { PageBreadcrumb, BreadcrumbSegment } from "@/components/layout/page-breadcrumb"; import {
PageBreadcrumb,
BreadcrumbSegment,
} from "@/components/layout/page-breadcrumb";
import { import {
Table, Table,
TableBody, TableBody,
@ -36,23 +39,21 @@ export default function ContentsListPage() {
const fileId = params.fileId as string; const fileId = params.fileId as string;
const client = useAuthClient(); const client = useAuthClient();
const getTextFromContent = (content: any): string => { const getTextFromContent = (content: unknown): string => {
if (typeof content === 'string') { if (typeof content === "string") {
return content; return content;
} else if (content && content.type === 'text') { } else if (content && content.type === "text") {
return content.text; return content.text;
} }
return ''; return "";
}; };
const [store, setStore] = useState<VectorStore | null>(null); const [store, setStore] = useState<VectorStore | null>(null);
const [file, setFile] = useState<VectorStoreFile | null>(null); const [file, setFile] = useState<VectorStoreFile | null>(null);
const [contents, setContents] = useState<VectorStoreContentItem[]>([]); const [contents, setContents] = useState<VectorStoreContentItem[]>([]);
const [isLoadingStore, setIsLoadingStore] = useState(true); const [isLoadingStore, setIsLoadingStore] = useState(true);
const [isLoadingFile, setIsLoadingFile] = useState(true);
const [isLoadingContents, setIsLoadingContents] = useState(true); const [isLoadingContents, setIsLoadingContents] = useState(true);
const [errorStore, setErrorStore] = useState<Error | null>(null); const [errorStore, setErrorStore] = useState<Error | null>(null);
const [errorFile, setErrorFile] = useState<Error | null>(null);
const [errorContents, setErrorContents] = useState<Error | null>(null); const [errorContents, setErrorContents] = useState<Error | null>(null);
useEffect(() => { useEffect(() => {
@ -65,7 +66,9 @@ export default function ContentsListPage() {
const response = await client.vectorStores.retrieve(vectorStoreId); const response = await client.vectorStores.retrieve(vectorStoreId);
setStore(response as VectorStore); setStore(response as VectorStore);
} catch (err) { } catch (err) {
setErrorStore(err instanceof Error ? err : new Error("Failed to load vector store.")); setErrorStore(
err instanceof Error ? err : new Error("Failed to load vector store.")
);
} finally { } finally {
setIsLoadingStore(false); setIsLoadingStore(false);
} }
@ -80,10 +83,15 @@ export default function ContentsListPage() {
setIsLoadingFile(true); setIsLoadingFile(true);
setErrorFile(null); setErrorFile(null);
try { try {
const response = await client.vectorStores.files.retrieve(vectorStoreId, fileId); const response = await client.vectorStores.files.retrieve(
vectorStoreId,
fileId
);
setFile(response as VectorStoreFile); setFile(response as VectorStoreFile);
} catch (err) { } catch (err) {
setErrorFile(err instanceof Error ? err : new Error("Failed to load file.")); setErrorFile(
err instanceof Error ? err : new Error("Failed to load file.")
);
} finally { } finally {
setIsLoadingFile(false); setIsLoadingFile(false);
} }
@ -99,10 +107,16 @@ export default function ContentsListPage() {
setErrorContents(null); setErrorContents(null);
try { try {
const contentsAPI = new ContentsAPI(client); const contentsAPI = new ContentsAPI(client);
const contentsResponse = await contentsAPI.listContents(vectorStoreId, fileId, { limit: 100 }); const contentsResponse = await contentsAPI.listContents(
vectorStoreId,
fileId,
{ limit: 100 }
);
setContents(contentsResponse.data); setContents(contentsResponse.data);
} catch (err) { } catch (err) {
setErrorContents(err instanceof Error ? err : new Error("Failed to load contents.")); setErrorContents(
err instanceof Error ? err : new Error("Failed to load contents.")
);
} finally { } finally {
setIsLoadingContents(false); setIsLoadingContents(false);
} }
@ -116,26 +130,36 @@ export default function ContentsListPage() {
await contentsAPI.deleteContent(vectorStoreId, fileId, contentId); await contentsAPI.deleteContent(vectorStoreId, fileId, contentId);
setContents(contents.filter(content => content.id !== contentId)); setContents(contents.filter(content => content.id !== contentId));
} catch (err) { } catch (err) {
console.error('Failed to delete content:', err); console.error("Failed to delete content:", err);
} }
}; };
const handleViewContent = (contentId: string) => { const handleViewContent = (contentId: string) => {
router.push(`/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents/${contentId}`); router.push(
`/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents/${contentId}`
);
}; };
const title = `Contents in File: ${fileId}`; const title = `Contents in File: ${fileId}`;
const breadcrumbSegments: BreadcrumbSegment[] = [ const breadcrumbSegments: BreadcrumbSegment[] = [
{ label: "Vector Stores", href: "/logs/vector-stores" }, { label: "Vector Stores", href: "/logs/vector-stores" },
{ label: store?.name || vectorStoreId, href: `/logs/vector-stores/${vectorStoreId}` }, {
label: store?.name || vectorStoreId,
href: `/logs/vector-stores/${vectorStoreId}`,
},
{ label: "Files", href: `/logs/vector-stores/${vectorStoreId}` }, { label: "Files", href: `/logs/vector-stores/${vectorStoreId}` },
{ label: fileId, href: `/logs/vector-stores/${vectorStoreId}/files/${fileId}` }, {
label: fileId,
href: `/logs/vector-stores/${vectorStoreId}/files/${fileId}`,
},
{ label: "Contents" }, { label: "Contents" },
]; ];
if (errorStore) { if (errorStore) {
return <DetailErrorView title={title} id={vectorStoreId} error={errorStore} />; return (
<DetailErrorView title={title} id={vectorStoreId} error={errorStore} />
);
} }
if (isLoadingStore) { if (isLoadingStore) {
return <DetailLoadingView title={title} />; return <DetailLoadingView title={title} />;
@ -175,7 +199,7 @@ export default function ContentsListPage() {
</TableRow> </TableRow>
</TableHeader> </TableHeader>
<TableBody> <TableBody>
{contents.map((content) => ( {contents.map(content => (
<TableRow key={content.id}> <TableRow key={content.id}>
<TableCell className="font-mono text-xs"> <TableCell className="font-mono text-xs">
<Button <Button
@ -189,7 +213,10 @@ export default function ContentsListPage() {
</TableCell> </TableCell>
<TableCell> <TableCell>
<div className="max-w-md"> <div className="max-w-md">
<p className="text-sm truncate" title={getTextFromContent(content.content)}> <p
className="text-sm truncate"
title={getTextFromContent(content.content)}
>
{getTextFromContent(content.content)} {getTextFromContent(content.content)}
</p> </p>
</div> </div>
@ -197,12 +224,25 @@ export default function ContentsListPage() {
<TableCell className="text-xs text-gray-500"> <TableCell className="text-xs text-gray-500">
{content.embedding && content.embedding.length > 0 ? ( {content.embedding && content.embedding.length > 0 ? (
<div className="max-w-xs"> <div className="max-w-xs">
<span className="font-mono text-xs bg-gray-100 dark:bg-gray-800 rounded px-1 py-0.5" title={`${content.embedding.length}D vector: [${content.embedding.slice(0, 3).map(v => v.toFixed(3)).join(', ')}...]`}> <span
[{content.embedding.slice(0, 3).map(v => v.toFixed(3)).join(', ')}...] ({content.embedding.length}D) className="font-mono text-xs bg-gray-100 dark:bg-gray-800 rounded px-1 py-0.5"
title={`${content.embedding.length}D vector: [${content.embedding
.slice(0, 3)
.map(v => v.toFixed(3))
.join(", ")}...]`}
>
[
{content.embedding
.slice(0, 3)
.map(v => v.toFixed(3))
.join(", ")}
...] ({content.embedding.length}D)
</span> </span>
</div> </div>
) : ( ) : (
<span className="text-gray-400 dark:text-gray-500 italic">No embedding</span> <span className="text-gray-400 dark:text-gray-500 italic">
No embedding
</span>
)} )}
</TableCell> </TableCell>
<TableCell className="text-xs text-gray-500"> <TableCell className="text-xs text-gray-500">
@ -211,7 +251,9 @@ export default function ContentsListPage() {
: `${content.metadata.content_length || 0} chars`} : `${content.metadata.content_length || 0} chars`}
</TableCell> </TableCell>
<TableCell className="text-xs"> <TableCell className="text-xs">
{new Date(content.created_timestamp * 1000).toLocaleString()} {new Date(
content.created_timestamp * 1000
).toLocaleString()}
</TableCell> </TableCell>
<TableCell> <TableCell>
<div className="flex gap-1"> <div className="flex gap-1">

View file

@ -4,9 +4,12 @@ import { useEffect, useState } from "react";
import { useParams, useRouter } from "next/navigation"; import { useParams, useRouter } from "next/navigation";
import { useAuthClient } from "@/hooks/use-auth-client"; import { useAuthClient } from "@/hooks/use-auth-client";
import type { VectorStore } from "llama-stack-client/resources/vector-stores/vector-stores"; import type { VectorStore } from "llama-stack-client/resources/vector-stores/vector-stores";
import type { VectorStoreFile, FileContentResponse } from "llama-stack-client/resources/vector-stores/files"; import type {
VectorStoreFile,
FileContentResponse,
} from "llama-stack-client/resources/vector-stores/files";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Skeleton } from '@/components/ui/skeleton'; import { Skeleton } from "@/components/ui/skeleton";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { List } from "lucide-react"; import { List } from "lucide-react";
import { import {
@ -17,7 +20,10 @@ import {
PropertiesCard, PropertiesCard,
PropertyItem, PropertyItem,
} from "@/components/layout/detail-layout"; } from "@/components/layout/detail-layout";
import { PageBreadcrumb, BreadcrumbSegment } from "@/components/layout/page-breadcrumb"; import {
PageBreadcrumb,
BreadcrumbSegment,
} from "@/components/layout/page-breadcrumb";
export default function FileDetailPage() { export default function FileDetailPage() {
const params = useParams(); const params = useParams();
@ -46,7 +52,9 @@ export default function FileDetailPage() {
const response = await client.vectorStores.retrieve(vectorStoreId); const response = await client.vectorStores.retrieve(vectorStoreId);
setStore(response as VectorStore); setStore(response as VectorStore);
} catch (err) { } catch (err) {
setErrorStore(err instanceof Error ? err : new Error("Failed to load vector store.")); setErrorStore(
err instanceof Error ? err : new Error("Failed to load vector store.")
);
} finally { } finally {
setIsLoadingStore(false); setIsLoadingStore(false);
} }
@ -61,10 +69,15 @@ export default function FileDetailPage() {
setIsLoadingFile(true); setIsLoadingFile(true);
setErrorFile(null); setErrorFile(null);
try { try {
const response = await client.vectorStores.files.retrieve(vectorStoreId, fileId); const response = await client.vectorStores.files.retrieve(
vectorStoreId,
fileId
);
setFile(response as VectorStoreFile); setFile(response as VectorStoreFile);
} catch (err) { } catch (err) {
setErrorFile(err instanceof Error ? err : new Error("Failed to load file.")); setErrorFile(
err instanceof Error ? err : new Error("Failed to load file.")
);
} finally { } finally {
setIsLoadingFile(false); setIsLoadingFile(false);
} }
@ -79,10 +92,15 @@ export default function FileDetailPage() {
setIsLoadingContents(true); setIsLoadingContents(true);
setErrorContents(null); setErrorContents(null);
try { try {
const response = await client.vectorStores.files.content(vectorStoreId, fileId); const response = await client.vectorStores.files.content(
vectorStoreId,
fileId
);
setContents(response); setContents(response);
} catch (err) { } catch (err) {
setErrorContents(err instanceof Error ? err : new Error("Failed to load contents.")); setErrorContents(
err instanceof Error ? err : new Error("Failed to load contents.")
);
} finally { } finally {
setIsLoadingContents(false); setIsLoadingContents(false);
} }
@ -91,20 +109,27 @@ export default function FileDetailPage() {
}, [vectorStoreId, fileId, client]); }, [vectorStoreId, fileId, client]);
const handleViewContents = () => { const handleViewContents = () => {
router.push(`/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents`); router.push(
`/logs/vector-stores/${vectorStoreId}/files/${fileId}/contents`
);
}; };
const title = `File: ${fileId}`; const title = `File: ${fileId}`;
const breadcrumbSegments: BreadcrumbSegment[] = [ const breadcrumbSegments: BreadcrumbSegment[] = [
{ label: "Vector Stores", href: "/logs/vector-stores" }, { label: "Vector Stores", href: "/logs/vector-stores" },
{ label: store?.name || vectorStoreId, href: `/logs/vector-stores/${vectorStoreId}` }, {
label: store?.name || vectorStoreId,
href: `/logs/vector-stores/${vectorStoreId}`,
},
{ label: "Files", href: `/logs/vector-stores/${vectorStoreId}` }, { label: "Files", href: `/logs/vector-stores/${vectorStoreId}` },
{ label: fileId }, { label: fileId },
]; ];
if (errorStore) { if (errorStore) {
return <DetailErrorView title={title} id={vectorStoreId} error={errorStore} />; return (
<DetailErrorView title={title} id={vectorStoreId} error={errorStore} />
);
} }
if (isLoadingStore) { if (isLoadingStore) {
return <DetailLoadingView title={title} />; return <DetailLoadingView title={title} />;
@ -136,19 +161,29 @@ export default function FileDetailPage() {
<h3 className="text-lg font-medium mb-2">File Details</h3> <h3 className="text-lg font-medium mb-2">File Details</h3>
<div className="grid grid-cols-2 gap-4 text-sm"> <div className="grid grid-cols-2 gap-4 text-sm">
<div> <div>
<span className="font-medium text-gray-600 dark:text-gray-400">Status:</span> <span className="font-medium text-gray-600 dark:text-gray-400">
Status:
</span>
<span className="ml-2">{file.status}</span> <span className="ml-2">{file.status}</span>
</div> </div>
<div> <div>
<span className="font-medium text-gray-600 dark:text-gray-400">Size:</span> <span className="font-medium text-gray-600 dark:text-gray-400">
Size:
</span>
<span className="ml-2">{file.usage_bytes} bytes</span> <span className="ml-2">{file.usage_bytes} bytes</span>
</div> </div>
<div> <div>
<span className="font-medium text-gray-600 dark:text-gray-400">Created:</span> <span className="font-medium text-gray-600 dark:text-gray-400">
<span className="ml-2">{new Date(file.created_at * 1000).toLocaleString()}</span> Created:
</span>
<span className="ml-2">
{new Date(file.created_at * 1000).toLocaleString()}
</span>
</div> </div>
<div> <div>
<span className="font-medium text-gray-600 dark:text-gray-400">Content Strategy:</span> <span className="font-medium text-gray-600 dark:text-gray-400">
Content Strategy:
</span>
<span className="ml-2">{file.chunking_strategy.type}</span> <span className="ml-2">{file.chunking_strategy.type}</span>
</div> </div>
</div> </div>
@ -166,9 +201,7 @@ export default function FileDetailPage() {
</div> </div>
</div> </div>
) : ( ) : (
<p className="text-gray-500 italic text-sm"> <p className="text-gray-500 italic text-sm">File not found.</p>
File not found.
</p>
)} )}
</CardContent> </CardContent>
</Card> </Card>
@ -192,16 +225,27 @@ export default function FileDetailPage() {
<div className="space-y-3"> <div className="space-y-3">
<div className="grid grid-cols-2 gap-4 text-sm"> <div className="grid grid-cols-2 gap-4 text-sm">
<div> <div>
<span className="font-medium text-gray-600 dark:text-gray-400">Content Items:</span> <span className="font-medium text-gray-600 dark:text-gray-400">
Content Items:
</span>
<span className="ml-2">{contents.content.length}</span> <span className="ml-2">{contents.content.length}</span>
</div> </div>
<div> <div>
<span className="font-medium text-gray-600 dark:text-gray-400">Total Characters:</span> <span className="font-medium text-gray-600 dark:text-gray-400">
<span className="ml-2">{contents.content.reduce((total, item) => total + item.text.length, 0)}</span> Total Characters:
</span>
<span className="ml-2">
{contents.content.reduce(
(total, item) => total + item.text.length,
0
)}
</span>
</div> </div>
</div> </div>
<div className="pt-2"> <div className="pt-2">
<span className="text-sm font-medium text-gray-600 dark:text-gray-400">Preview:</span> <span className="text-sm font-medium text-gray-600 dark:text-gray-400">
Preview:
</span>
<div className="mt-1 bg-gray-50 dark:bg-gray-800 rounded-md p-3"> <div className="mt-1 bg-gray-50 dark:bg-gray-800 rounded-md p-3">
<p className="text-sm text-gray-900 dark:text-gray-100 line-clamp-3"> <p className="text-sm text-gray-900 dark:text-gray-100 line-clamp-3">
{contents.content[0]?.text.substring(0, 200)}... {contents.content[0]?.text.substring(0, 200)}...

View file

@ -1,7 +1,7 @@
"use client"; "use client";
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { useParams, useRouter } from "next/navigation"; import { useParams } from "next/navigation";
import { useAuthClient } from "@/hooks/use-auth-client"; import { useAuthClient } from "@/hooks/use-auth-client";
import type { VectorStore } from "llama-stack-client/resources/vector-stores/vector-stores"; import type { VectorStore } from "llama-stack-client/resources/vector-stores/vector-stores";
import type { VectorStoreFile } from "llama-stack-client/resources/vector-stores/files"; import type { VectorStoreFile } from "llama-stack-client/resources/vector-stores/files";
@ -11,7 +11,6 @@ export default function VectorStoreDetailPage() {
const params = useParams(); const params = useParams();
const id = params.id as string; const id = params.id as string;
const client = useAuthClient(); const client = useAuthClient();
const router = useRouter();
const [store, setStore] = useState<VectorStore | null>(null); const [store, setStore] = useState<VectorStore | null>(null);
const [files, setFiles] = useState<VectorStoreFile[]>([]); const [files, setFiles] = useState<VectorStoreFile[]>([]);
@ -34,9 +33,7 @@ export default function VectorStoreDetailPage() {
setStore(response as VectorStore); setStore(response as VectorStore);
} catch (err) { } catch (err) {
setErrorStore( setErrorStore(
err instanceof Error err instanceof Error ? err : new Error("Failed to load vector store.")
? err
: new Error("Failed to load vector store."),
); );
} finally { } finally {
setIsLoadingStore(false); setIsLoadingStore(false);
@ -55,18 +52,18 @@ export default function VectorStoreDetailPage() {
setIsLoadingFiles(true); setIsLoadingFiles(true);
setErrorFiles(null); setErrorFiles(null);
try { try {
const result = await client.vectorStores.files.list(id as any); const result = await client.vectorStores.files.list(id);
setFiles((result as any).data); setFiles((result as { data: VectorStoreFile[] }).data);
} catch (err) { } catch (err) {
setErrorFiles( setErrorFiles(
err instanceof Error ? err : new Error("Failed to load files."), err instanceof Error ? err : new Error("Failed to load files.")
); );
} finally { } finally {
setIsLoadingFiles(false); setIsLoadingFiles(false);
} }
}; };
fetchFiles(); fetchFiles();
}, [id]); }, [id, client.vectorStores.files]);
return ( return (
<VectorStoreDetailView <VectorStoreDetailView

View file

@ -1,7 +1,6 @@
"use client"; "use client";
import React from "react"; import React from "react";
import { useAuthClient } from "@/hooks/use-auth-client";
import type { import type {
ListVectorStoresResponse, ListVectorStoresResponse,
VectorStore, VectorStore,
@ -12,7 +11,6 @@ import { Button } from "@/components/ui/button";
import { import {
Table, Table,
TableBody, TableBody,
TableCaption,
TableCell, TableCell,
TableHead, TableHead,
TableHeader, TableHeader,
@ -21,7 +19,6 @@ import {
import { Skeleton } from "@/components/ui/skeleton"; import { Skeleton } from "@/components/ui/skeleton";
export default function VectorStoresPage() { export default function VectorStoresPage() {
const client = useAuthClient();
const router = useRouter(); const router = useRouter();
const { const {
data: stores, data: stores,
@ -37,7 +34,7 @@ export default function VectorStoresPage() {
after: params.after, after: params.after,
limit: params.limit, limit: params.limit,
order: params.order, order: params.order,
} as any); } as Parameters<typeof client.vectorStores.list>[0]);
return response as ListVectorStoresResponse; return response as ListVectorStoresResponse;
}, },
errorMessagePrefix: "vector stores", errorMessagePrefix: "vector stores",
@ -88,7 +85,7 @@ export default function VectorStoresPage() {
</TableRow> </TableRow>
</TableHeader> </TableHeader>
<TableBody> <TableBody>
{stores.map((store) => { {stores.map(store => {
const fileCounts = store.file_counts; const fileCounts = store.file_counts;
const metadata = store.metadata || {}; const metadata = store.metadata || {};
const providerId = metadata.provider_id ?? ""; const providerId = metadata.provider_id ?? "";

View file

@ -14,7 +14,7 @@ describe("ChatCompletionDetailView", () => {
isLoading={true} isLoading={true}
error={null} error={null}
id="test-id" id="test-id"
/>, />
); );
// Use the data-slot attribute for Skeletons // Use the data-slot attribute for Skeletons
const skeletons = container.querySelectorAll('[data-slot="skeleton"]'); const skeletons = container.querySelectorAll('[data-slot="skeleton"]');
@ -28,10 +28,10 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={{ name: "Error", message: "Network Error" }} error={{ name: "Error", message: "Network Error" }}
id="err-id" id="err-id"
/>, />
); );
expect( expect(
screen.getByText(/Error loading details for ID err-id: Network Error/), screen.getByText(/Error loading details for ID err-id: Network Error/)
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -42,11 +42,11 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={{ name: "Error", message: "" }} error={{ name: "Error", message: "" }}
id="err-id" id="err-id"
/>, />
); );
// Use regex to match the error message regardless of whitespace // Use regex to match the error message regardless of whitespace
expect( expect(
screen.getByText(/Error loading details for ID\s*err-id\s*:/), screen.getByText(/Error loading details for ID\s*err-id\s*:/)
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -57,11 +57,11 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={{} as Error} error={{} as Error}
id="err-id" id="err-id"
/>, />
); );
// Use regex to match the error message regardless of whitespace // Use regex to match the error message regardless of whitespace
expect( expect(
screen.getByText(/Error loading details for ID\s*err-id\s*:/), screen.getByText(/Error loading details for ID\s*err-id\s*:/)
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -72,10 +72,10 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={null} error={null}
id="notfound-id" id="notfound-id"
/>, />
); );
expect( expect(
screen.getByText("No details found for ID: notfound-id."), screen.getByText("No details found for ID: notfound-id.")
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -100,7 +100,7 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={null} error={null}
id={mockCompletion.id} id={mockCompletion.id}
/>, />
); );
// Input // Input
expect(screen.getByText("Input")).toBeInTheDocument(); expect(screen.getByText("Input")).toBeInTheDocument();
@ -112,7 +112,7 @@ describe("ChatCompletionDetailView", () => {
expect(screen.getByText("Properties")).toBeInTheDocument(); expect(screen.getByText("Properties")).toBeInTheDocument();
expect(screen.getByText("Created:")).toBeInTheDocument(); expect(screen.getByText("Created:")).toBeInTheDocument();
expect( expect(
screen.getByText(new Date(1710000000 * 1000).toLocaleString()), screen.getByText(new Date(1710000000 * 1000).toLocaleString())
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("ID:")).toBeInTheDocument(); expect(screen.getByText("ID:")).toBeInTheDocument();
expect(screen.getByText("comp_123")).toBeInTheDocument(); expect(screen.getByText("comp_123")).toBeInTheDocument();
@ -150,7 +150,7 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={null} error={null}
id={mockCompletion.id} id={mockCompletion.id}
/>, />
); );
// Output should include the tool call block (should be present twice: input and output) // Output should include the tool call block (should be present twice: input and output)
const toolCallLabels = screen.getAllByText("Tool Call"); const toolCallLabels = screen.getAllByText("Tool Call");
@ -178,13 +178,13 @@ describe("ChatCompletionDetailView", () => {
isLoading={false} isLoading={false}
error={null} error={null}
id={mockCompletion.id} id={mockCompletion.id}
/>, />
); );
// Input section should be present but empty // Input section should be present but empty
expect(screen.getByText("Input")).toBeInTheDocument(); expect(screen.getByText("Input")).toBeInTheDocument();
// Output section should show fallback message // Output section should show fallback message
expect( expect(
screen.getByText("No message found in assistant's choice."), screen.getByText("No message found in assistant's choice.")
).toBeInTheDocument(); ).toBeInTheDocument();
// Properties should show N/A for finish reason // Properties should show N/A for finish reason
expect(screen.getByText("Finish Reason:")).toBeInTheDocument(); expect(screen.getByText("Finish Reason:")).toBeInTheDocument();

View file

@ -53,14 +53,14 @@ export function ChatCompletionDetailView({
{completion.choices?.[0]?.message?.tool_calls && {completion.choices?.[0]?.message?.tool_calls &&
Array.isArray(completion.choices[0].message.tool_calls) && Array.isArray(completion.choices[0].message.tool_calls) &&
!completion.input_messages?.some( !completion.input_messages?.some(
(im) => im =>
im.role === "assistant" && im.role === "assistant" &&
im.tool_calls && im.tool_calls &&
Array.isArray(im.tool_calls) && Array.isArray(im.tool_calls) &&
im.tool_calls.length > 0, im.tool_calls.length > 0
) )
? completion.choices[0].message.tool_calls.map( ? completion.choices[0].message.tool_calls.map(
(toolCall: any, index: number) => { (toolCall: { function?: { name?: string } }, index: number) => {
const assistantToolCallMessage: ChatMessage = { const assistantToolCallMessage: ChatMessage = {
role: "assistant", role: "assistant",
tool_calls: [toolCall], tool_calls: [toolCall],
@ -72,7 +72,7 @@ export function ChatCompletionDetailView({
message={assistantToolCallMessage} message={assistantToolCallMessage}
/> />
); );
}, }
) )
: null} : null}
</CardContent> </CardContent>
@ -89,7 +89,7 @@ export function ChatCompletionDetailView({
/> />
) : ( ) : (
<p className="text-gray-500 italic text-sm"> <p className="text-gray-500 italic text-sm">
No message found in assistant's choice. No message found in assistant&apos;s choice.
</p> </p>
)} )}
</CardContent> </CardContent>
@ -120,13 +120,18 @@ export function ChatCompletionDetailView({
value={ value={
<div> <div>
<ul className="list-disc list-inside pl-4 mt-1"> <ul className="list-disc list-inside pl-4 mt-1">
{toolCalls.map((toolCall: any, index: number) => ( {toolCalls.map(
(
toolCall: { function?: { name?: string } },
index: number
) => (
<li key={index}> <li key={index}>
<span className="text-gray-900 font-medium"> <span className="text-gray-900 font-medium">
{toolCall.function?.name || "N/A"} {toolCall.function?.name || "N/A"}
</span> </span>
</li> </li>
))} )
)}
</ul> </ul>
</div> </div>
} }

View file

@ -83,7 +83,7 @@ describe("ChatCompletionsTable", () => {
// Default pass-through implementations // Default pass-through implementations
truncateText.mockImplementation((text: string | undefined) => text); truncateText.mockImplementation((text: string | undefined) => text);
extractTextFromContentPart.mockImplementation((content: unknown) => extractTextFromContentPart.mockImplementation((content: unknown) =>
typeof content === "string" ? content : "extracted text", typeof content === "string" ? content : "extracted text"
); );
extractDisplayableText.mockImplementation((message: unknown) => { extractDisplayableText.mockImplementation((message: unknown) => {
const msg = message as { content?: string }; const msg = message as { content?: string };
@ -138,7 +138,7 @@ describe("ChatCompletionsTable", () => {
if (row) { if (row) {
fireEvent.click(row); fireEvent.click(row);
expect(mockPush).toHaveBeenCalledWith( expect(mockPush).toHaveBeenCalledWith(
"/logs/chat-completions/completion_123", "/logs/chat-completions/completion_123"
); );
} else { } else {
throw new Error('Row with "Test prompt" not found for router mock test.'); throw new Error('Row with "Test prompt" not found for router mock test.');
@ -162,7 +162,7 @@ describe("ChatCompletionsTable", () => {
expect(tableCaption).toBeInTheDocument(); expect(tableCaption).toBeInTheDocument();
if (tableCaption) { if (tableCaption) {
const captionSkeleton = tableCaption.querySelector( const captionSkeleton = tableCaption.querySelector(
'[data-slot="skeleton"]', '[data-slot="skeleton"]'
); );
expect(captionSkeleton).toBeInTheDocument(); expect(captionSkeleton).toBeInTheDocument();
} }
@ -172,7 +172,7 @@ describe("ChatCompletionsTable", () => {
expect(tableBody).toBeInTheDocument(); expect(tableBody).toBeInTheDocument();
if (tableBody) { if (tableBody) {
const bodySkeletons = tableBody.querySelectorAll( const bodySkeletons = tableBody.querySelectorAll(
'[data-slot="skeleton"]', '[data-slot="skeleton"]'
); );
expect(bodySkeletons.length).toBeGreaterThan(0); expect(bodySkeletons.length).toBeGreaterThan(0);
} }
@ -192,14 +192,14 @@ describe("ChatCompletionsTable", () => {
render(<ChatCompletionsTable {...defaultProps} />); render(<ChatCompletionsTable {...defaultProps} />);
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText(errorMessage)).toBeInTheDocument(); expect(screen.getByText(errorMessage)).toBeInTheDocument();
}); });
test.each([{ name: "Error", message: "" }, {}])( test.each([{ name: "Error", message: "" }, {}])(
"renders default error message when error has no message", "renders default error message when error has no message",
(errorObject) => { errorObject => {
mockedUsePagination.mockReturnValue({ mockedUsePagination.mockReturnValue({
data: [], data: [],
status: "error", status: "error",
@ -210,14 +210,14 @@ describe("ChatCompletionsTable", () => {
render(<ChatCompletionsTable {...defaultProps} />); render(<ChatCompletionsTable {...defaultProps} />);
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect( expect(
screen.getByText( screen.getByText(
"An unexpected error occurred while loading the data.", "An unexpected error occurred while loading the data."
), )
).toBeInTheDocument(); ).toBeInTheDocument();
}, }
); );
}); });
@ -225,7 +225,7 @@ describe("ChatCompletionsTable", () => {
test('renders "No chat completions found." and no table when data array is empty', () => { test('renders "No chat completions found." and no table when data array is empty', () => {
render(<ChatCompletionsTable {...defaultProps} />); render(<ChatCompletionsTable {...defaultProps} />);
expect( expect(
screen.getByText("No chat completions found."), screen.getByText("No chat completions found.")
).toBeInTheDocument(); ).toBeInTheDocument();
// Ensure that the table structure is NOT rendered in the empty state // Ensure that the table structure is NOT rendered in the empty state
@ -292,7 +292,7 @@ describe("ChatCompletionsTable", () => {
// Table caption // Table caption
expect( expect(
screen.getByText("A list of your recent chat completions."), screen.getByText("A list of your recent chat completions.")
).toBeInTheDocument(); ).toBeInTheDocument();
// Table headers // Table headers
@ -306,14 +306,14 @@ describe("ChatCompletionsTable", () => {
expect(screen.getByText("Test output")).toBeInTheDocument(); expect(screen.getByText("Test output")).toBeInTheDocument();
expect(screen.getByText("llama-test-model")).toBeInTheDocument(); expect(screen.getByText("llama-test-model")).toBeInTheDocument();
expect( expect(
screen.getByText(new Date(1710000000 * 1000).toLocaleString()), screen.getByText(new Date(1710000000 * 1000).toLocaleString())
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Another input")).toBeInTheDocument(); expect(screen.getByText("Another input")).toBeInTheDocument();
expect(screen.getByText("Another output")).toBeInTheDocument(); expect(screen.getByText("Another output")).toBeInTheDocument();
expect(screen.getByText("llama-another-model")).toBeInTheDocument(); expect(screen.getByText("llama-another-model")).toBeInTheDocument();
expect( expect(
screen.getByText(new Date(1710001000 * 1000).toLocaleString()), screen.getByText(new Date(1710001000 * 1000).toLocaleString())
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
}); });
@ -328,7 +328,7 @@ describe("ChatCompletionsTable", () => {
return typeof text === "string" && text.length > effectiveMaxLength return typeof text === "string" && text.length > effectiveMaxLength
? text.slice(0, effectiveMaxLength) + "..." ? text.slice(0, effectiveMaxLength) + "..."
: text; : text;
}, }
); );
const longInput = const longInput =
@ -368,7 +368,7 @@ describe("ChatCompletionsTable", () => {
// The truncated text should be present for both input and output // The truncated text should be present for both input and output
const truncatedTexts = screen.getAllByText( const truncatedTexts = screen.getAllByText(
longInput.slice(0, 10) + "...", longInput.slice(0, 10) + "..."
); );
expect(truncatedTexts.length).toBe(2); // one for input, one for output expect(truncatedTexts.length).toBe(2); // one for input, one for output
}); });
@ -420,7 +420,7 @@ describe("ChatCompletionsTable", () => {
// Verify the extracted text appears in the table // Verify the extracted text appears in the table
expect(screen.getByText("Extracted input")).toBeInTheDocument(); expect(screen.getByText("Extracted input")).toBeInTheDocument();
expect( expect(
screen.getByText("Extracted output from assistant"), screen.getByText("Extracted output from assistant")
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
}); });

View file

@ -5,6 +5,7 @@ import {
UsePaginationOptions, UsePaginationOptions,
ListChatCompletionsResponse, ListChatCompletionsResponse,
} from "@/lib/types"; } from "@/lib/types";
import { ListChatCompletionsParams } from "@/lib/llama-stack-client";
import { LogsTable, LogTableRow } from "@/components/logs/logs-table"; import { LogsTable, LogTableRow } from "@/components/logs/logs-table";
import { import {
extractTextFromContentPart, extractTextFromContentPart,
@ -38,14 +39,14 @@ export function ChatCompletionsTable({
limit: number; limit: number;
model?: string; model?: string;
order?: string; order?: string;
}, }
) => { ) => {
const response = await client.chat.completions.list({ const response = await client.chat.completions.list({
after: params.after, after: params.after,
limit: params.limit, limit: params.limit,
...(params.model && { model: params.model }), ...(params.model && { model: params.model }),
...(params.order && { order: params.order }), ...(params.order && { order: params.order }),
} as any); } as ListChatCompletionsParams);
return response as ListChatCompletionsResponse; return response as ListChatCompletionsResponse;
}; };

View file

@ -37,7 +37,11 @@ export function ChatMessageItem({ message }: ChatMessageItemProps) {
) { ) {
return ( return (
<> <>
{message.tool_calls.map((toolCall: any, index: number) => { {message.tool_calls.map(
(
toolCall: { function?: { name?: string; arguments?: unknown } },
index: number
) => {
const formattedToolCall = formatToolCallToString(toolCall); const formattedToolCall = formatToolCallToString(toolCall);
const toolCallContent = ( const toolCallContent = (
<ToolCallBlock> <ToolCallBlock>
@ -51,7 +55,8 @@ export function ChatMessageItem({ message }: ChatMessageItemProps) {
content={toolCallContent} content={toolCallContent}
/> />
); );
})} }
)}
</> </>
); );
} else { } else {

View file

@ -1,18 +1,18 @@
"use client" "use client";
import React, { useMemo, useState } from "react" import React, { useMemo, useState } from "react";
import { cva, type VariantProps } from "class-variance-authority" import { cva, type VariantProps } from "class-variance-authority";
import { motion } from "framer-motion" import { motion } from "framer-motion";
import { Ban, ChevronRight, Code2, Loader2, Terminal } from "lucide-react" import { Ban, ChevronRight, Code2, Loader2, Terminal } from "lucide-react";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
import { import {
Collapsible, Collapsible,
CollapsibleContent, CollapsibleContent,
CollapsibleTrigger, CollapsibleTrigger,
} from "@/components/ui/collapsible" } from "@/components/ui/collapsible";
import { FilePreview } from "@/components/ui/file-preview" import { FilePreview } from "@/components/ui/file-preview";
import { MarkdownRenderer } from "@/components/chat-playground/markdown-renderer" import { MarkdownRenderer } from "@/components/chat-playground/markdown-renderer";
const chatBubbleVariants = cva( const chatBubbleVariants = cva(
"group/message relative break-words rounded-lg p-3 text-sm sm:max-w-[70%]", "group/message relative break-words rounded-lg p-3 text-sm sm:max-w-[70%]",
@ -52,66 +52,66 @@ const chatBubbleVariants = cva(
}, },
], ],
} }
) );
type Animation = VariantProps<typeof chatBubbleVariants>["animation"] type Animation = VariantProps<typeof chatBubbleVariants>["animation"];
interface Attachment { interface Attachment {
name?: string name?: string;
contentType?: string contentType?: string;
url: string url: string;
} }
interface PartialToolCall { interface PartialToolCall {
state: "partial-call" state: "partial-call";
toolName: string toolName: string;
} }
interface ToolCall { interface ToolCall {
state: "call" state: "call";
toolName: string toolName: string;
} }
interface ToolResult { interface ToolResult {
state: "result" state: "result";
toolName: string toolName: string;
result: { result: {
__cancelled?: boolean __cancelled?: boolean;
[key: string]: any [key: string]: unknown;
} };
} }
type ToolInvocation = PartialToolCall | ToolCall | ToolResult type ToolInvocation = PartialToolCall | ToolCall | ToolResult;
interface ReasoningPart { interface ReasoningPart {
type: "reasoning" type: "reasoning";
reasoning: string reasoning: string;
} }
interface ToolInvocationPart { interface ToolInvocationPart {
type: "tool-invocation" type: "tool-invocation";
toolInvocation: ToolInvocation toolInvocation: ToolInvocation;
} }
interface TextPart { interface TextPart {
type: "text" type: "text";
text: string text: string;
} }
// For compatibility with AI SDK types, not used // For compatibility with AI SDK types, not used
interface SourcePart { interface SourcePart {
type: "source" type: "source";
source?: any source?: unknown;
} }
interface FilePart { interface FilePart {
type: "file" type: "file";
mimeType: string mimeType: string;
data: string data: string;
} }
interface StepStartPart { interface StepStartPart {
type: "step-start" type: "step-start";
} }
type MessagePart = type MessagePart =
@ -120,22 +120,22 @@ type MessagePart =
| ToolInvocationPart | ToolInvocationPart
| SourcePart | SourcePart
| FilePart | FilePart
| StepStartPart | StepStartPart;
export interface Message { export interface Message {
id: string id: string;
role: "user" | "assistant" | (string & {}) role: "user" | "assistant" | (string & {});
content: string content: string;
createdAt?: Date createdAt?: Date;
experimental_attachments?: Attachment[] experimental_attachments?: Attachment[];
toolInvocations?: ToolInvocation[] toolInvocations?: ToolInvocation[];
parts?: MessagePart[] parts?: MessagePart[];
} }
export interface ChatMessageProps extends Message { export interface ChatMessageProps extends Message {
showTimeStamp?: boolean showTimeStamp?: boolean;
animation?: Animation animation?: Animation;
actions?: React.ReactNode actions?: React.ReactNode;
} }
export const ChatMessage: React.FC<ChatMessageProps> = ({ export const ChatMessage: React.FC<ChatMessageProps> = ({
@ -150,21 +150,21 @@ export const ChatMessage: React.FC<ChatMessageProps> = ({
parts, parts,
}) => { }) => {
const files = useMemo(() => { const files = useMemo(() => {
return experimental_attachments?.map((attachment) => { return experimental_attachments?.map(attachment => {
const dataArray = dataUrlToUint8Array(attachment.url) const dataArray = dataUrlToUint8Array(attachment.url);
const file = new File([dataArray], attachment.name ?? "Unknown", { const file = new File([dataArray], attachment.name ?? "Unknown", {
type: attachment.contentType, type: attachment.contentType,
}) });
return file return file;
}) });
}, [experimental_attachments]) }, [experimental_attachments]);
const isUser = role === "user" const isUser = role === "user";
const formattedTime = createdAt?.toLocaleTimeString("en-US", { const formattedTime = createdAt?.toLocaleTimeString("en-US", {
hour: "2-digit", hour: "2-digit",
minute: "2-digit", minute: "2-digit",
}) });
if (isUser) { if (isUser) {
return ( return (
@ -174,7 +174,7 @@ export const ChatMessage: React.FC<ChatMessageProps> = ({
{files ? ( {files ? (
<div className="mb-1 flex flex-wrap gap-2"> <div className="mb-1 flex flex-wrap gap-2">
{files.map((file, index) => { {files.map((file, index) => {
return <FilePreview file={file} key={index} /> return <FilePreview file={file} key={index} />;
})} })}
</div> </div>
) : null} ) : null}
@ -195,7 +195,7 @@ export const ChatMessage: React.FC<ChatMessageProps> = ({
</time> </time>
) : null} ) : null}
</div> </div>
) );
} }
if (parts && parts.length > 0) { if (parts && parts.length > 0) {
@ -230,23 +230,23 @@ export const ChatMessage: React.FC<ChatMessageProps> = ({
</time> </time>
) : null} ) : null}
</div> </div>
) );
} else if (part.type === "reasoning") { } else if (part.type === "reasoning") {
return <ReasoningBlock key={`reasoning-${index}`} part={part} /> return <ReasoningBlock key={`reasoning-${index}`} part={part} />;
} else if (part.type === "tool-invocation") { } else if (part.type === "tool-invocation") {
return ( return (
<ToolCall <ToolCall
key={`tool-${index}`} key={`tool-${index}`}
toolInvocations={[part.toolInvocation]} toolInvocations={[part.toolInvocation]}
/> />
) );
} }
return null return null;
}) });
} }
if (toolInvocations && toolInvocations.length > 0) { if (toolInvocations && toolInvocations.length > 0) {
return <ToolCall toolInvocations={toolInvocations} /> return <ToolCall toolInvocations={toolInvocations} />;
} }
return ( return (
@ -272,17 +272,17 @@ export const ChatMessage: React.FC<ChatMessageProps> = ({
</time> </time>
) : null} ) : null}
</div> </div>
) );
} };
function dataUrlToUint8Array(data: string) { function dataUrlToUint8Array(data: string) {
const base64 = data.split(",")[1] const base64 = data.split(",")[1];
const buf = Buffer.from(base64, "base64") const buf = Buffer.from(base64, "base64");
return new Uint8Array(buf) return new Uint8Array(buf);
} }
const ReasoningBlock = ({ part }: { part: ReasoningPart }) => { const ReasoningBlock = ({ part }: { part: ReasoningPart }) => {
const [isOpen, setIsOpen] = useState(false) const [isOpen, setIsOpen] = useState(false);
return ( return (
<div className="mb-2 flex flex-col items-start sm:max-w-[70%]"> <div className="mb-2 flex flex-col items-start sm:max-w-[70%]">
@ -319,20 +319,20 @@ const ReasoningBlock = ({ part }: { part: ReasoningPart }) => {
</CollapsibleContent> </CollapsibleContent>
</Collapsible> </Collapsible>
</div> </div>
) );
} };
function ToolCall({ function ToolCall({
toolInvocations, toolInvocations,
}: Pick<ChatMessageProps, "toolInvocations">) { }: Pick<ChatMessageProps, "toolInvocations">) {
if (!toolInvocations?.length) return null if (!toolInvocations?.length) return null;
return ( return (
<div className="flex flex-col items-start gap-2"> <div className="flex flex-col items-start gap-2">
{toolInvocations.map((invocation, index) => { {toolInvocations.map((invocation, index) => {
const isCancelled = const isCancelled =
invocation.state === "result" && invocation.state === "result" &&
invocation.result.__cancelled === true invocation.result.__cancelled === true;
if (isCancelled) { if (isCancelled) {
return ( return (
@ -350,7 +350,7 @@ function ToolCall({
</span> </span>
</span> </span>
</div> </div>
) );
} }
switch (invocation.state) { switch (invocation.state) {
@ -373,7 +373,7 @@ function ToolCall({
</span> </span>
<Loader2 className="h-3 w-3 animate-spin" /> <Loader2 className="h-3 w-3 animate-spin" />
</div> </div>
) );
case "result": case "result":
return ( return (
<div <div
@ -395,11 +395,11 @@ function ToolCall({
{JSON.stringify(invocation.result, null, 2)} {JSON.stringify(invocation.result, null, 2)}
</pre> </pre>
</div> </div>
) );
default: default:
return null return null;
} }
})} })}
</div> </div>
) );
} }

View file

@ -1,4 +1,4 @@
"use client" "use client";
import { import {
forwardRef, forwardRef,
@ -6,48 +6,48 @@ import {
useRef, useRef,
useState, useState,
type ReactElement, type ReactElement,
} from "react" } from "react";
import { ArrowDown, ThumbsDown, ThumbsUp } from "lucide-react" import { ArrowDown, ThumbsDown, ThumbsUp } from "lucide-react";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
import { useAutoScroll } from "@/hooks/use-auto-scroll" import { useAutoScroll } from "@/hooks/use-auto-scroll";
import { Button } from "@/components/ui/button" import { Button } from "@/components/ui/button";
import { type Message } from "@/components/chat-playground/chat-message" import { type Message } from "@/components/chat-playground/chat-message";
import { CopyButton } from "@/components/ui/copy-button" import { CopyButton } from "@/components/ui/copy-button";
import { MessageInput } from "@/components/chat-playground/message-input" import { MessageInput } from "@/components/chat-playground/message-input";
import { MessageList } from "@/components/chat-playground/message-list" import { MessageList } from "@/components/chat-playground/message-list";
import { PromptSuggestions } from "@/components/chat-playground/prompt-suggestions" import { PromptSuggestions } from "@/components/chat-playground/prompt-suggestions";
interface ChatPropsBase { interface ChatPropsBase {
handleSubmit: ( handleSubmit: (
event?: { preventDefault?: () => void }, event?: { preventDefault?: () => void },
options?: { experimental_attachments?: FileList } options?: { experimental_attachments?: FileList }
) => void ) => void;
messages: Array<Message> messages: Array<Message>;
input: string input: string;
className?: string className?: string;
handleInputChange: React.ChangeEventHandler<HTMLTextAreaElement> handleInputChange: React.ChangeEventHandler<HTMLTextAreaElement>;
isGenerating: boolean isGenerating: boolean;
stop?: () => void stop?: () => void;
onRateResponse?: ( onRateResponse?: (
messageId: string, messageId: string,
rating: "thumbs-up" | "thumbs-down" rating: "thumbs-up" | "thumbs-down"
) => void ) => void;
setMessages?: (messages: any[]) => void setMessages?: (messages: Message[]) => void;
transcribeAudio?: (blob: Blob) => Promise<string> transcribeAudio?: (blob: Blob) => Promise<string>;
} }
interface ChatPropsWithoutSuggestions extends ChatPropsBase { interface ChatPropsWithoutSuggestions extends ChatPropsBase {
append?: never append?: never;
suggestions?: never suggestions?: never;
} }
interface ChatPropsWithSuggestions extends ChatPropsBase { interface ChatPropsWithSuggestions extends ChatPropsBase {
append: (message: { role: "user"; content: string }) => void append: (message: { role: "user"; content: string }) => void;
suggestions: string[] suggestions: string[];
} }
type ChatProps = ChatPropsWithoutSuggestions | ChatPropsWithSuggestions type ChatProps = ChatPropsWithoutSuggestions | ChatPropsWithSuggestions;
export function Chat({ export function Chat({
messages, messages,
@ -63,34 +63,34 @@ export function Chat({
setMessages, setMessages,
transcribeAudio, transcribeAudio,
}: ChatProps) { }: ChatProps) {
const lastMessage = messages.at(-1) const lastMessage = messages.at(-1);
const isEmpty = messages.length === 0 const isEmpty = messages.length === 0;
const isTyping = lastMessage?.role === "user" const isTyping = lastMessage?.role === "user";
const messagesRef = useRef(messages) const messagesRef = useRef(messages);
messagesRef.current = messages messagesRef.current = messages;
// Enhanced stop function that marks pending tool calls as cancelled // Enhanced stop function that marks pending tool calls as cancelled
const handleStop = useCallback(() => { const handleStop = useCallback(() => {
stop?.() stop?.();
if (!setMessages) return if (!setMessages) return;
const latestMessages = [...messagesRef.current] const latestMessages = [...messagesRef.current];
const lastAssistantMessage = latestMessages.findLast( const lastAssistantMessage = latestMessages.findLast(
(m) => m.role === "assistant" m => m.role === "assistant"
) );
if (!lastAssistantMessage) return if (!lastAssistantMessage) return;
let needsUpdate = false let needsUpdate = false;
let updatedMessage = { ...lastAssistantMessage } let updatedMessage = { ...lastAssistantMessage };
if (lastAssistantMessage.toolInvocations) { if (lastAssistantMessage.toolInvocations) {
const updatedToolInvocations = lastAssistantMessage.toolInvocations.map( const updatedToolInvocations = lastAssistantMessage.toolInvocations.map(
(toolInvocation) => { toolInvocation => {
if (toolInvocation.state === "call") { if (toolInvocation.state === "call") {
needsUpdate = true needsUpdate = true;
return { return {
...toolInvocation, ...toolInvocation,
state: "result", state: "result",
@ -98,28 +98,32 @@ export function Chat({
content: "Tool execution was cancelled", content: "Tool execution was cancelled",
__cancelled: true, // Special marker to indicate cancellation __cancelled: true, // Special marker to indicate cancellation
}, },
} as const } as const;
} }
return toolInvocation return toolInvocation;
} }
) );
if (needsUpdate) { if (needsUpdate) {
updatedMessage = { updatedMessage = {
...updatedMessage, ...updatedMessage,
toolInvocations: updatedToolInvocations, toolInvocations: updatedToolInvocations,
} };
} }
} }
if (lastAssistantMessage.parts && lastAssistantMessage.parts.length > 0) { if (lastAssistantMessage.parts && lastAssistantMessage.parts.length > 0) {
const updatedParts = lastAssistantMessage.parts.map((part: any) => { const updatedParts = lastAssistantMessage.parts.map(
(part: {
type: string;
toolInvocation?: { state: string; toolName: string };
}) => {
if ( if (
part.type === "tool-invocation" && part.type === "tool-invocation" &&
part.toolInvocation && part.toolInvocation &&
part.toolInvocation.state === "call" part.toolInvocation.state === "call"
) { ) {
needsUpdate = true needsUpdate = true;
return { return {
...part, ...part,
toolInvocation: { toolInvocation: {
@ -130,29 +134,30 @@ export function Chat({
__cancelled: true, __cancelled: true,
}, },
}, },
};
} }
return part;
} }
return part );
})
if (needsUpdate) { if (needsUpdate) {
updatedMessage = { updatedMessage = {
...updatedMessage, ...updatedMessage,
parts: updatedParts, parts: updatedParts,
} };
} }
} }
if (needsUpdate) { if (needsUpdate) {
const messageIndex = latestMessages.findIndex( const messageIndex = latestMessages.findIndex(
(m) => m.id === lastAssistantMessage.id m => m.id === lastAssistantMessage.id
) );
if (messageIndex !== -1) { if (messageIndex !== -1) {
latestMessages[messageIndex] = updatedMessage latestMessages[messageIndex] = updatedMessage;
setMessages(latestMessages) setMessages(latestMessages);
} }
} }
}, [stop, setMessages, messagesRef]) }, [stop, setMessages, messagesRef]);
const messageOptions = useCallback( const messageOptions = useCallback(
(message: Message) => ({ (message: Message) => ({
@ -189,7 +194,7 @@ export function Chat({
), ),
}), }),
[onRateResponse] [onRateResponse]
) );
return ( return (
<ChatContainer className={className}> <ChatContainer className={className}>
@ -237,15 +242,15 @@ export function Chat({
</div> </div>
</div> </div>
</ChatContainer> </ChatContainer>
) );
} }
Chat.displayName = "Chat" Chat.displayName = "Chat";
export function ChatMessages({ export function ChatMessages({
messages, messages,
children, children,
}: React.PropsWithChildren<{ }: React.PropsWithChildren<{
messages: Message[] messages: Message[];
}>) { }>) {
const { const {
containerRef, containerRef,
@ -253,7 +258,7 @@ export function ChatMessages({
handleScroll, handleScroll,
shouldAutoScroll, shouldAutoScroll,
handleTouchStart, handleTouchStart,
} = useAutoScroll([messages]) } = useAutoScroll([messages]);
return ( return (
<div <div
@ -281,7 +286,7 @@ export function ChatMessages({
</div> </div>
)} )}
</div> </div>
) );
} }
export const ChatContainer = forwardRef< export const ChatContainer = forwardRef<
@ -294,56 +299,56 @@ export const ChatContainer = forwardRef<
className={cn("flex flex-col max-h-full w-full", className)} className={cn("flex flex-col max-h-full w-full", className)}
{...props} {...props}
/> />
) );
}) });
ChatContainer.displayName = "ChatContainer" ChatContainer.displayName = "ChatContainer";
interface ChatFormProps { interface ChatFormProps {
className?: string className?: string;
isPending: boolean isPending: boolean;
handleSubmit: ( handleSubmit: (
event?: { preventDefault?: () => void }, event?: { preventDefault?: () => void },
options?: { experimental_attachments?: FileList } options?: { experimental_attachments?: FileList }
) => void ) => void;
children: (props: { children: (props: {
files: File[] | null files: File[] | null;
setFiles: React.Dispatch<React.SetStateAction<File[] | null>> setFiles: React.Dispatch<React.SetStateAction<File[] | null>>;
}) => ReactElement }) => ReactElement;
} }
export const ChatForm = forwardRef<HTMLFormElement, ChatFormProps>( export const ChatForm = forwardRef<HTMLFormElement, ChatFormProps>(
({ children, handleSubmit, isPending, className }, ref) => { ({ children, handleSubmit, isPending, className }, ref) => {
const [files, setFiles] = useState<File[] | null>(null) const [files, setFiles] = useState<File[] | null>(null);
const onSubmit = (event: React.FormEvent) => { const onSubmit = (event: React.FormEvent) => {
// if (isPending) { if (isPending) {
// event.preventDefault() event.preventDefault();
// return return;
// } }
if (!files) { if (!files) {
handleSubmit(event) handleSubmit(event);
return return;
} }
const fileList = createFileList(files) const fileList = createFileList(files);
handleSubmit(event, { experimental_attachments: fileList }) handleSubmit(event, { experimental_attachments: fileList });
setFiles(null) setFiles(null);
} };
return ( return (
<form ref={ref} onSubmit={onSubmit} className={className}> <form ref={ref} onSubmit={onSubmit} className={className}>
{children({ files, setFiles })} {children({ files, setFiles })}
</form> </form>
) );
} }
) );
ChatForm.displayName = "ChatForm" ChatForm.displayName = "ChatForm";
function createFileList(files: File[] | FileList): FileList { function createFileList(files: File[] | FileList): FileList {
const dataTransfer = new DataTransfer() const dataTransfer = new DataTransfer();
for (const file of Array.from(files)) { for (const file of Array.from(files)) {
dataTransfer.items.add(file) dataTransfer.items.add(file);
} }
return dataTransfer.files return dataTransfer.files;
} }

View file

@ -1,11 +1,11 @@
"use client" "use client";
import { AnimatePresence, motion } from "framer-motion" import { AnimatePresence, motion } from "framer-motion";
import { X } from "lucide-react" import { X } from "lucide-react";
interface InterruptPromptProps { interface InterruptPromptProps {
isOpen: boolean isOpen: boolean;
close: () => void close: () => void;
} }
export function InterruptPrompt({ isOpen, close }: InterruptPromptProps) { export function InterruptPrompt({ isOpen, close }: InterruptPromptProps) {
@ -37,5 +37,5 @@ export function InterruptPrompt({ isOpen, close }: InterruptPromptProps) {
</motion.div> </motion.div>
)} )}
</AnimatePresence> </AnimatePresence>
) );
} }

View file

@ -1,12 +1,12 @@
import React, { Suspense, useEffect, useState } from "react" import React, { Suspense, useEffect, useState } from "react";
import Markdown from "react-markdown" import Markdown from "react-markdown";
import remarkGfm from "remark-gfm" import remarkGfm from "remark-gfm";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
import { CopyButton } from "@/components/ui/copy-button" import { CopyButton } from "@/components/ui/copy-button";
interface MarkdownRendererProps { interface MarkdownRendererProps {
children: string children: string;
} }
export function MarkdownRenderer({ children }: MarkdownRendererProps) { export function MarkdownRenderer({ children }: MarkdownRendererProps) {
@ -16,34 +16,34 @@ export function MarkdownRenderer({ children }: MarkdownRendererProps) {
{children} {children}
</Markdown> </Markdown>
</div> </div>
) );
} }
interface HighlightedPre extends React.HTMLAttributes<HTMLPreElement> { interface HighlightedPre extends React.HTMLAttributes<HTMLPreElement> {
children: string children: string;
language: string language: string;
} }
const HighlightedPre = React.memo( const HighlightedPre = React.memo(
({ children, language, ...props }: HighlightedPre) => { ({ children, language, ...props }: HighlightedPre) => {
const [tokens, setTokens] = useState<any[] | null>(null) const [tokens, setTokens] = useState<unknown[] | null>(null);
const [isSupported, setIsSupported] = useState(false) const [isSupported, setIsSupported] = useState(false);
useEffect(() => { useEffect(() => {
let mounted = true let mounted = true;
const loadAndHighlight = async () => { const loadAndHighlight = async () => {
try { try {
const { codeToTokens, bundledLanguages } = await import("shiki") const { codeToTokens, bundledLanguages } = await import("shiki");
if (!mounted) return if (!mounted) return;
if (!(language in bundledLanguages)) { if (!(language in bundledLanguages)) {
setIsSupported(false) setIsSupported(false);
return return;
} }
setIsSupported(true) setIsSupported(true);
const { tokens: highlightedTokens } = await codeToTokens(children, { const { tokens: highlightedTokens } = await codeToTokens(children, {
lang: language as keyof typeof bundledLanguages, lang: language as keyof typeof bundledLanguages,
@ -52,31 +52,31 @@ const HighlightedPre = React.memo(
light: "github-light", light: "github-light",
dark: "github-dark", dark: "github-dark",
}, },
}) });
if (mounted) { if (mounted) {
setTokens(highlightedTokens) setTokens(highlightedTokens);
} }
} catch (error) { } catch {
if (mounted) { if (mounted) {
setIsSupported(false) setIsSupported(false);
}
} }
} }
};
loadAndHighlight() loadAndHighlight();
return () => { return () => {
mounted = false mounted = false;
} };
}, [children, language]) }, [children, language]);
if (!isSupported) { if (!isSupported) {
return <pre {...props}>{children}</pre> return <pre {...props}>{children}</pre>;
} }
if (!tokens) { if (!tokens) {
return <pre {...props}>{children}</pre> return <pre {...props}>{children}</pre>;
} }
return ( return (
@ -89,7 +89,7 @@ const HighlightedPre = React.memo(
const style = const style =
typeof token.htmlStyle === "string" typeof token.htmlStyle === "string"
? undefined ? undefined
: token.htmlStyle : token.htmlStyle;
return ( return (
<span <span
@ -99,7 +99,7 @@ const HighlightedPre = React.memo(
> >
{token.content} {token.content}
</span> </span>
) );
})} })}
</span> </span>
{lineIndex !== tokens.length - 1 && "\n"} {lineIndex !== tokens.length - 1 && "\n"}
@ -107,15 +107,15 @@ const HighlightedPre = React.memo(
))} ))}
</code> </code>
</pre> </pre>
) );
} }
) );
HighlightedPre.displayName = "HighlightedCode" HighlightedPre.displayName = "HighlightedCode";
interface CodeBlockProps extends React.HTMLAttributes<HTMLPreElement> { interface CodeBlockProps extends React.HTMLAttributes<HTMLPreElement> {
children: React.ReactNode children: React.ReactNode;
className?: string className?: string;
language: string language: string;
} }
const CodeBlock = ({ const CodeBlock = ({
@ -127,12 +127,12 @@ const CodeBlock = ({
const code = const code =
typeof children === "string" typeof children === "string"
? children ? children
: childrenTakeAllStringContents(children) : childrenTakeAllStringContents(children);
const preClass = cn( const preClass = cn(
"overflow-x-scroll rounded-md border bg-background/50 p-4 font-mono text-sm [scrollbar-width:none]", "overflow-x-scroll rounded-md border bg-background/50 p-4 font-mono text-sm [scrollbar-width:none]",
className className
) );
return ( return (
<div className="group/code relative mb-4"> <div className="group/code relative mb-4">
@ -152,27 +152,27 @@ const CodeBlock = ({
<CopyButton content={code} copyMessage="Copied code to clipboard" /> <CopyButton content={code} copyMessage="Copied code to clipboard" />
</div> </div>
</div> </div>
) );
} };
function childrenTakeAllStringContents(element: any): string { function childrenTakeAllStringContents(element: unknown): string {
if (typeof element === "string") { if (typeof element === "string") {
return element return element;
} }
if (element?.props?.children) { if (element?.props?.children) {
let children = element.props.children const children = element.props.children;
if (Array.isArray(children)) { if (Array.isArray(children)) {
return children return children
.map((child) => childrenTakeAllStringContents(child)) .map(child => childrenTakeAllStringContents(child))
.join("") .join("");
} else { } else {
return childrenTakeAllStringContents(children) return childrenTakeAllStringContents(children);
} }
} }
return "" return "";
} }
const COMPONENTS = { const COMPONENTS = {
@ -184,8 +184,14 @@ const COMPONENTS = {
strong: withClass("strong", "font-semibold"), strong: withClass("strong", "font-semibold"),
a: withClass("a", "text-primary underline underline-offset-2"), a: withClass("a", "text-primary underline underline-offset-2"),
blockquote: withClass("blockquote", "border-l-2 border-primary pl-4"), blockquote: withClass("blockquote", "border-l-2 border-primary pl-4"),
code: ({ children, className, node, ...rest }: any) => { code: ({
const match = /language-(\w+)/.exec(className || "") children,
className,
}: {
children: React.ReactNode;
className?: string;
}) => {
const match = /language-(\w+)/.exec(className || "");
return match ? ( return match ? (
<CodeBlock className={className} language={match[1]} {...rest}> <CodeBlock className={className} language={match[1]} {...rest}>
{children} {children}
@ -199,9 +205,9 @@ const COMPONENTS = {
> >
{children} {children}
</code> </code>
) );
}, },
pre: ({ children }: any) => children, pre: ({ children }: { children: React.ReactNode }) => children,
ol: withClass("ol", "list-decimal space-y-2 pl-6"), ol: withClass("ol", "list-decimal space-y-2 pl-6"),
ul: withClass("ul", "list-disc space-y-2 pl-6"), ul: withClass("ul", "list-disc space-y-2 pl-6"),
li: withClass("li", "my-1.5"), li: withClass("li", "my-1.5"),
@ -220,14 +226,14 @@ const COMPONENTS = {
tr: withClass("tr", "m-0 border-t p-0 even:bg-muted"), tr: withClass("tr", "m-0 border-t p-0 even:bg-muted"),
p: withClass("p", "whitespace-pre-wrap"), p: withClass("p", "whitespace-pre-wrap"),
hr: withClass("hr", "border-foreground/20"), hr: withClass("hr", "border-foreground/20"),
} };
function withClass(Tag: keyof JSX.IntrinsicElements, classes: string) { function withClass(Tag: keyof JSX.IntrinsicElements, classes: string) {
const Component = ({ node, ...props }: any) => ( const Component = ({ ...props }: Record<string, unknown>) => (
<Tag className={classes} {...props} /> <Tag className={classes} {...props} />
) );
Component.displayName = Tag Component.displayName = Tag;
return Component return Component;
} }
export default MarkdownRenderer export default MarkdownRenderer;

View file

@ -1,41 +1,41 @@
"use client" "use client";
import React, { useEffect, useRef, useState } from "react" import React, { useEffect, useRef, useState } from "react";
import { AnimatePresence, motion } from "framer-motion" import { AnimatePresence, motion } from "framer-motion";
import { ArrowUp, Info, Loader2, Mic, Paperclip, Square } from "lucide-react" import { ArrowUp, Info, Loader2, Mic, Paperclip, Square } from "lucide-react";
import { omit } from "remeda" import { omit } from "remeda";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
import { useAudioRecording } from "@/hooks/use-audio-recording" import { useAudioRecording } from "@/hooks/use-audio-recording";
import { useAutosizeTextArea } from "@/hooks/use-autosize-textarea" import { useAutosizeTextArea } from "@/hooks/use-autosize-textarea";
import { AudioVisualizer } from "@/components/ui/audio-visualizer" import { AudioVisualizer } from "@/components/ui/audio-visualizer";
import { Button } from "@/components/ui/button" import { Button } from "@/components/ui/button";
import { FilePreview } from "@/components/ui/file-preview" import { FilePreview } from "@/components/ui/file-preview";
import { InterruptPrompt } from "@/components/chat-playground/interrupt-prompt" import { InterruptPrompt } from "@/components/chat-playground/interrupt-prompt";
interface MessageInputBaseProps interface MessageInputBaseProps
extends React.TextareaHTMLAttributes<HTMLTextAreaElement> { extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {
value: string value: string;
submitOnEnter?: boolean submitOnEnter?: boolean;
stop?: () => void stop?: () => void;
isGenerating: boolean isGenerating: boolean;
enableInterrupt?: boolean enableInterrupt?: boolean;
transcribeAudio?: (blob: Blob) => Promise<string> transcribeAudio?: (blob: Blob) => Promise<string>;
} }
interface MessageInputWithoutAttachmentProps extends MessageInputBaseProps { interface MessageInputWithoutAttachmentProps extends MessageInputBaseProps {
allowAttachments?: false allowAttachments?: false;
} }
interface MessageInputWithAttachmentsProps extends MessageInputBaseProps { interface MessageInputWithAttachmentsProps extends MessageInputBaseProps {
allowAttachments: true allowAttachments: true;
files: File[] | null files: File[] | null;
setFiles: React.Dispatch<React.SetStateAction<File[] | null>> setFiles: React.Dispatch<React.SetStateAction<File[] | null>>;
} }
type MessageInputProps = type MessageInputProps =
| MessageInputWithoutAttachmentProps | MessageInputWithoutAttachmentProps
| MessageInputWithAttachmentsProps | MessageInputWithAttachmentsProps;
export function MessageInput({ export function MessageInput({
placeholder = "Ask AI...", placeholder = "Ask AI...",
@ -48,8 +48,8 @@ export function MessageInput({
transcribeAudio, transcribeAudio,
...props ...props
}: MessageInputProps) { }: MessageInputProps) {
const [isDragging, setIsDragging] = useState(false) const [isDragging, setIsDragging] = useState(false);
const [showInterruptPrompt, setShowInterruptPrompt] = useState(false) const [showInterruptPrompt, setShowInterruptPrompt] = useState(false);
const { const {
isListening, isListening,
@ -61,123 +61,124 @@ export function MessageInput({
stopRecording, stopRecording,
} = useAudioRecording({ } = useAudioRecording({
transcribeAudio, transcribeAudio,
onTranscriptionComplete: (text) => { onTranscriptionComplete: text => {
props.onChange?.({ target: { value: text } } as any) props.onChange?.({
target: { value: text },
} as React.ChangeEvent<HTMLTextAreaElement>);
}, },
}) });
useEffect(() => { useEffect(() => {
if (!isGenerating) { if (!isGenerating) {
setShowInterruptPrompt(false) setShowInterruptPrompt(false);
} }
}, [isGenerating]) }, [isGenerating]);
const addFiles = (files: File[] | null) => { const addFiles = (files: File[] | null) => {
if (props.allowAttachments) { if (props.allowAttachments) {
props.setFiles((currentFiles) => { props.setFiles(currentFiles => {
if (currentFiles === null) { if (currentFiles === null) {
return files return files;
} }
if (files === null) { if (files === null) {
return currentFiles return currentFiles;
} }
return [...currentFiles, ...files] return [...currentFiles, ...files];
}) });
}
} }
};
const onDragOver = (event: React.DragEvent) => { const onDragOver = (event: React.DragEvent) => {
if (props.allowAttachments !== true) return if (props.allowAttachments !== true) return;
event.preventDefault() event.preventDefault();
setIsDragging(true) setIsDragging(true);
} };
const onDragLeave = (event: React.DragEvent) => { const onDragLeave = (event: React.DragEvent) => {
if (props.allowAttachments !== true) return if (props.allowAttachments !== true) return;
event.preventDefault() event.preventDefault();
setIsDragging(false) setIsDragging(false);
} };
const onDrop = (event: React.DragEvent) => { const onDrop = (event: React.DragEvent) => {
setIsDragging(false) setIsDragging(false);
if (props.allowAttachments !== true) return if (props.allowAttachments !== true) return;
event.preventDefault() event.preventDefault();
const dataTransfer = event.dataTransfer const dataTransfer = event.dataTransfer;
if (dataTransfer.files.length) { if (dataTransfer.files.length) {
addFiles(Array.from(dataTransfer.files)) addFiles(Array.from(dataTransfer.files));
}
} }
};
const onPaste = (event: React.ClipboardEvent) => { const onPaste = (event: React.ClipboardEvent) => {
const items = event.clipboardData?.items const items = event.clipboardData?.items;
if (!items) return if (!items) return;
const text = event.clipboardData.getData("text") const text = event.clipboardData.getData("text");
if (text && text.length > 500 && props.allowAttachments) { if (text && text.length > 500 && props.allowAttachments) {
event.preventDefault() event.preventDefault();
const blob = new Blob([text], { type: "text/plain" }) const blob = new Blob([text], { type: "text/plain" });
const file = new File([blob], "Pasted text", { const file = new File([blob], "Pasted text", {
type: "text/plain", type: "text/plain",
lastModified: Date.now(), lastModified: Date.now(),
}) });
addFiles([file]) addFiles([file]);
return return;
} }
const files = Array.from(items) const files = Array.from(items)
.map((item) => item.getAsFile()) .map(item => item.getAsFile())
.filter((file) => file !== null) .filter(file => file !== null);
if (props.allowAttachments && files.length > 0) { if (props.allowAttachments && files.length > 0) {
addFiles(files) addFiles(files);
}
} }
};
const onKeyDown = (event: React.KeyboardEvent<HTMLTextAreaElement>) => { const onKeyDown = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
if (submitOnEnter && event.key === "Enter" && !event.shiftKey) { if (submitOnEnter && event.key === "Enter" && !event.shiftKey) {
event.preventDefault() event.preventDefault();
if (isGenerating && stop && enableInterrupt) { if (isGenerating && stop && enableInterrupt) {
if (showInterruptPrompt) { if (showInterruptPrompt) {
stop() stop();
setShowInterruptPrompt(false) setShowInterruptPrompt(false);
event.currentTarget.form?.requestSubmit() event.currentTarget.form?.requestSubmit();
} else if ( } else if (
props.value || props.value ||
(props.allowAttachments && props.files?.length) (props.allowAttachments && props.files?.length)
) { ) {
setShowInterruptPrompt(true) setShowInterruptPrompt(true);
return return;
} }
} }
event.currentTarget.form?.requestSubmit() event.currentTarget.form?.requestSubmit();
} }
onKeyDownProp?.(event) onKeyDownProp?.(event);
} };
const textAreaRef = useRef<HTMLTextAreaElement>(null) const textAreaRef = useRef<HTMLTextAreaElement>(null);
const [textAreaHeight, setTextAreaHeight] = useState<number>(0) const [textAreaHeight, setTextAreaHeight] = useState<number>(0);
useEffect(() => { useEffect(() => {
if (textAreaRef.current) { if (textAreaRef.current) {
setTextAreaHeight(textAreaRef.current.offsetHeight) setTextAreaHeight(textAreaRef.current.offsetHeight);
} }
}, [props.value]) }, [props.value]);
const showFileList = const showFileList =
props.allowAttachments && props.files && props.files.length > 0 props.allowAttachments && props.files && props.files.length > 0;
useAutosizeTextArea({ useAutosizeTextArea({
ref: textAreaRef, ref: textAreaRef,
maxHeight: 240, maxHeight: 240,
borderWidth: 1, borderWidth: 1,
dependencies: [props.value, showFileList], dependencies: [props.value, showFileList],
}) });
return ( return (
<div <div
@ -220,24 +221,24 @@ export function MessageInput({
<div className="absolute inset-x-3 bottom-0 z-20 overflow-x-scroll py-3"> <div className="absolute inset-x-3 bottom-0 z-20 overflow-x-scroll py-3">
<div className="flex space-x-3"> <div className="flex space-x-3">
<AnimatePresence mode="popLayout"> <AnimatePresence mode="popLayout">
{props.files?.map((file) => { {props.files?.map(file => {
return ( return (
<FilePreview <FilePreview
key={file.name + String(file.lastModified)} key={file.name + String(file.lastModified)}
file={file} file={file}
onRemove={() => { onRemove={() => {
props.setFiles((files) => { props.setFiles(files => {
if (!files) return null if (!files) return null;
const filtered = Array.from(files).filter( const filtered = Array.from(files).filter(
(f) => f !== file f => f !== file
) );
if (filtered.length === 0) return null if (filtered.length === 0) return null;
return filtered return filtered;
}) });
}} }}
/> />
) );
})} })}
</AnimatePresence> </AnimatePresence>
</div> </div>
@ -256,8 +257,8 @@ export function MessageInput({
aria-label="Attach a file" aria-label="Attach a file"
disabled={true} disabled={true}
onClick={async () => { onClick={async () => {
const files = await showFileUploadDialog() const files = await showFileUploadDialog();
addFiles(files) addFiles(files);
}} }}
> >
<Paperclip className="h-4 w-4" /> <Paperclip className="h-4 w-4" />
@ -308,12 +309,12 @@ export function MessageInput({
onStopRecording={stopRecording} onStopRecording={stopRecording}
/> />
</div> </div>
) );
} }
MessageInput.displayName = "MessageInput" MessageInput.displayName = "MessageInput";
interface FileUploadOverlayProps { interface FileUploadOverlayProps {
isDragging: boolean isDragging: boolean;
} }
function FileUploadOverlay({ isDragging }: FileUploadOverlayProps) { function FileUploadOverlay({ isDragging }: FileUploadOverlayProps) {
@ -333,29 +334,29 @@ function FileUploadOverlay({ isDragging }: FileUploadOverlayProps) {
</motion.div> </motion.div>
)} )}
</AnimatePresence> </AnimatePresence>
) );
} }
function showFileUploadDialog() { function showFileUploadDialog() {
const input = document.createElement("input") const input = document.createElement("input");
input.type = "file" input.type = "file";
input.multiple = true input.multiple = true;
input.accept = "*/*" input.accept = "*/*";
input.click() input.click();
return new Promise<File[] | null>((resolve) => { return new Promise<File[] | null>(resolve => {
input.onchange = (e) => { input.onchange = e => {
const files = (e.currentTarget as HTMLInputElement).files const files = (e.currentTarget as HTMLInputElement).files;
if (files) { if (files) {
resolve(Array.from(files)) resolve(Array.from(files));
return return;
} }
resolve(null) resolve(null);
} };
}) });
} }
function TranscribingOverlay() { function TranscribingOverlay() {
@ -385,12 +386,12 @@ function TranscribingOverlay() {
Transcribing audio... Transcribing audio...
</p> </p>
</motion.div> </motion.div>
) );
} }
interface RecordingPromptProps { interface RecordingPromptProps {
isVisible: boolean isVisible: boolean;
onStopRecording: () => void onStopRecording: () => void;
} }
function RecordingPrompt({ isVisible, onStopRecording }: RecordingPromptProps) { function RecordingPrompt({ isVisible, onStopRecording }: RecordingPromptProps) {
@ -418,15 +419,15 @@ function RecordingPrompt({ isVisible, onStopRecording }: RecordingPromptProps) {
</motion.div> </motion.div>
)} )}
</AnimatePresence> </AnimatePresence>
) );
} }
interface RecordingControlsProps { interface RecordingControlsProps {
isRecording: boolean isRecording: boolean;
isTranscribing: boolean isTranscribing: boolean;
audioStream: MediaStream | null audioStream: MediaStream | null;
textAreaHeight: number textAreaHeight: number;
onStopRecording: () => void onStopRecording: () => void;
} }
function RecordingControls({ function RecordingControls({
@ -448,7 +449,7 @@ function RecordingControls({
onClick={onStopRecording} onClick={onStopRecording}
/> />
</div> </div>
) );
} }
if (isTranscribing) { if (isTranscribing) {
@ -459,8 +460,8 @@ function RecordingControls({
> >
<TranscribingOverlay /> <TranscribingOverlay />
</div> </div>
) );
} }
return null return null;
} }

View file

@ -2,18 +2,18 @@ import {
ChatMessage, ChatMessage,
type ChatMessageProps, type ChatMessageProps,
type Message, type Message,
} from "@/components/chat-playground/chat-message" } from "@/components/chat-playground/chat-message";
import { TypingIndicator } from "@/components/chat-playground/typing-indicator" import { TypingIndicator } from "@/components/chat-playground/typing-indicator";
type AdditionalMessageOptions = Omit<ChatMessageProps, keyof Message> type AdditionalMessageOptions = Omit<ChatMessageProps, keyof Message>;
interface MessageListProps { interface MessageListProps {
messages: Message[] messages: Message[];
showTimeStamps?: boolean showTimeStamps?: boolean;
isTyping?: boolean isTyping?: boolean;
messageOptions?: messageOptions?:
| AdditionalMessageOptions | AdditionalMessageOptions
| ((message: Message) => AdditionalMessageOptions) | ((message: Message) => AdditionalMessageOptions);
} }
export function MessageList({ export function MessageList({
@ -28,7 +28,7 @@ export function MessageList({
const additionalOptions = const additionalOptions =
typeof messageOptions === "function" typeof messageOptions === "function"
? messageOptions(message) ? messageOptions(message)
: messageOptions : messageOptions;
return ( return (
<ChatMessage <ChatMessage
@ -37,9 +37,9 @@ export function MessageList({
{...message} {...message}
{...additionalOptions} {...additionalOptions}
/> />
) );
})} })}
{isTyping && <TypingIndicator />} {isTyping && <TypingIndicator />}
</div> </div>
) );
} }

View file

@ -1,7 +1,7 @@
interface PromptSuggestionsProps { interface PromptSuggestionsProps {
label: string label: string;
append: (message: { role: "user"; content: string }) => void append: (message: { role: "user"; content: string }) => void;
suggestions: string[] suggestions: string[];
} }
export function PromptSuggestions({ export function PromptSuggestions({
@ -13,7 +13,7 @@ export function PromptSuggestions({
<div className="space-y-6"> <div className="space-y-6">
<h2 className="text-center text-2xl font-bold">{label}</h2> <h2 className="text-center text-2xl font-bold">{label}</h2>
<div className="flex gap-6 text-sm"> <div className="flex gap-6 text-sm">
{suggestions.map((suggestion) => ( {suggestions.map(suggestion => (
<button <button
key={suggestion} key={suggestion}
onClick={() => append({ role: "user", content: suggestion })} onClick={() => append({ role: "user", content: suggestion })}
@ -24,5 +24,5 @@ export function PromptSuggestions({
))} ))}
</div> </div>
</div> </div>
) );
} }

View file

@ -1,4 +1,4 @@
import { Dot } from "lucide-react" import { Dot } from "lucide-react";
export function TypingIndicator() { export function TypingIndicator() {
return ( return (
@ -11,5 +11,5 @@ export function TypingIndicator() {
</div> </div>
</div> </div>
</div> </div>
) );
} }

View file

@ -56,7 +56,8 @@ const manageItems = [
}, },
]; ];
const optimizeItems: { title: string; url: string; icon: React.ElementType }[] = [ const optimizeItems: { title: string; url: string; icon: React.ElementType }[] =
[
{ {
title: "Evaluations", title: "Evaluations",
url: "", url: "",
@ -79,7 +80,7 @@ export function AppSidebar() {
const pathname = usePathname(); const pathname = usePathname();
const renderSidebarItems = (items: SidebarItem[]) => { const renderSidebarItems = (items: SidebarItem[]) => {
return items.map((item) => { return items.map(item => {
const isActive = pathname.startsWith(item.url); const isActive = pathname.startsWith(item.url);
return ( return (
<SidebarMenuItem key={item.title}> <SidebarMenuItem key={item.title}>
@ -88,14 +89,14 @@ export function AppSidebar() {
className={cn( className={cn(
"justify-start", "justify-start",
isActive && isActive &&
"bg-gray-200 dark:bg-gray-700 hover:bg-gray-200 dark:hover:bg-gray-700 text-gray-900 dark:text-gray-100", "bg-gray-200 dark:bg-gray-700 hover:bg-gray-200 dark:hover:bg-gray-700 text-gray-900 dark:text-gray-100"
)} )}
> >
<Link href={item.url}> <Link href={item.url}>
<item.icon <item.icon
className={cn( className={cn(
isActive && "text-gray-900 dark:text-gray-100", isActive && "text-gray-900 dark:text-gray-100",
"mr-2 h-4 w-4", "mr-2 h-4 w-4"
)} )}
/> />
<span>{item.title}</span> <span>{item.title}</span>
@ -130,7 +131,7 @@ return (
<SidebarGroupLabel>Optimize</SidebarGroupLabel> <SidebarGroupLabel>Optimize</SidebarGroupLabel>
<SidebarGroupContent> <SidebarGroupContent>
<SidebarMenu> <SidebarMenu>
{optimizeItems.map((item) => ( {optimizeItems.map(item => (
<SidebarMenuItem key={item.title}> <SidebarMenuItem key={item.title}>
<SidebarMenuButton <SidebarMenuButton
disabled disabled
@ -138,7 +139,9 @@ return (
> >
<item.icon className="mr-2 h-4 w-4" /> <item.icon className="mr-2 h-4 w-4" />
<span>{item.title}</span> <span>{item.title}</span>
<span className="ml-2 text-xs text-gray-500">(Coming Soon)</span> <span className="ml-2 text-xs text-gray-500">
(Coming Soon)
</span>
</SidebarMenuButton> </SidebarMenuButton>
</SidebarMenuItem> </SidebarMenuItem>
))} ))}

View file

@ -2,7 +2,7 @@ import React from "react";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Skeleton } from "@/components/ui/skeleton"; import { Skeleton } from "@/components/ui/skeleton";
export function DetailLoadingView({ title }: { title: string }) { export function DetailLoadingView() {
return ( return (
<> <>
<Skeleton className="h-8 w-3/4 mb-6" /> {/* Title Skeleton */} <Skeleton className="h-8 w-3/4 mb-6" /> {/* Title Skeleton */}

View file

@ -67,7 +67,7 @@ describe("LogsTable Viewport Loading", () => {
() => { () => {
expect(mockLoadMore).toHaveBeenCalled(); expect(mockLoadMore).toHaveBeenCalled();
}, },
{ timeout: 300 }, { timeout: 300 }
); );
expect(mockLoadMore).toHaveBeenCalledTimes(1); expect(mockLoadMore).toHaveBeenCalledTimes(1);
@ -81,11 +81,11 @@ describe("LogsTable Viewport Loading", () => {
{...defaultProps} {...defaultProps}
status="loading-more" status="loading-more"
onLoadMore={mockLoadMore} onLoadMore={mockLoadMore}
/>, />
); );
// Wait for possible triggers // Wait for possible triggers
await new Promise((resolve) => setTimeout(resolve, 300)); await new Promise(resolve => setTimeout(resolve, 300));
expect(mockLoadMore).not.toHaveBeenCalled(); expect(mockLoadMore).not.toHaveBeenCalled();
}); });
@ -94,15 +94,11 @@ describe("LogsTable Viewport Loading", () => {
const mockLoadMore = jest.fn(); const mockLoadMore = jest.fn();
render( render(
<LogsTable <LogsTable {...defaultProps} status="loading" onLoadMore={mockLoadMore} />
{...defaultProps}
status="loading"
onLoadMore={mockLoadMore}
/>,
); );
// Wait for possible triggers // Wait for possible triggers
await new Promise((resolve) => setTimeout(resolve, 300)); await new Promise(resolve => setTimeout(resolve, 300));
expect(mockLoadMore).not.toHaveBeenCalled(); expect(mockLoadMore).not.toHaveBeenCalled();
}); });
@ -111,18 +107,18 @@ describe("LogsTable Viewport Loading", () => {
const mockLoadMore = jest.fn(); const mockLoadMore = jest.fn();
render( render(
<LogsTable {...defaultProps} hasMore={false} onLoadMore={mockLoadMore} />, <LogsTable {...defaultProps} hasMore={false} onLoadMore={mockLoadMore} />
); );
// Wait for possible triggers // Wait for possible triggers
await new Promise((resolve) => setTimeout(resolve, 300)); await new Promise(resolve => setTimeout(resolve, 300));
expect(mockLoadMore).not.toHaveBeenCalled(); expect(mockLoadMore).not.toHaveBeenCalled();
}); });
test("sentinel element should not be rendered when loading", () => { test("sentinel element should not be rendered when loading", () => {
const { container } = render( const { container } = render(
<LogsTable {...defaultProps} status="loading-more" />, <LogsTable {...defaultProps} status="loading-more" />
); );
// Check that no sentinel row with height: 1 exists // Check that no sentinel row with height: 1 exists
@ -132,7 +128,7 @@ describe("LogsTable Viewport Loading", () => {
test("sentinel element should be rendered when not loading and hasMore", () => { test("sentinel element should be rendered when not loading and hasMore", () => {
const { container } = render( const { container } = render(
<LogsTable {...defaultProps} hasMore={true} status="idle" />, <LogsTable {...defaultProps} hasMore={true} status="idle" />
); );
// Check that sentinel row exists // Check that sentinel row exists

View file

@ -70,7 +70,7 @@ describe("LogsTable", () => {
describe("Loading State", () => { describe("Loading State", () => {
test("renders skeleton UI when isLoading is true", () => { test("renders skeleton UI when isLoading is true", () => {
const { container } = render( const { container } = render(
<LogsTable {...defaultProps} status="loading" />, <LogsTable {...defaultProps} status="loading" />
); );
// Check for skeleton in the table caption // Check for skeleton in the table caption
@ -78,7 +78,7 @@ describe("LogsTable", () => {
expect(tableCaption).toBeInTheDocument(); expect(tableCaption).toBeInTheDocument();
if (tableCaption) { if (tableCaption) {
const captionSkeleton = tableCaption.querySelector( const captionSkeleton = tableCaption.querySelector(
'[data-slot="skeleton"]', '[data-slot="skeleton"]'
); );
expect(captionSkeleton).toBeInTheDocument(); expect(captionSkeleton).toBeInTheDocument();
} }
@ -88,7 +88,7 @@ describe("LogsTable", () => {
expect(tableBody).toBeInTheDocument(); expect(tableBody).toBeInTheDocument();
if (tableBody) { if (tableBody) {
const bodySkeletons = tableBody.querySelectorAll( const bodySkeletons = tableBody.querySelectorAll(
'[data-slot="skeleton"]', '[data-slot="skeleton"]'
); );
expect(bodySkeletons.length).toBeGreaterThan(0); expect(bodySkeletons.length).toBeGreaterThan(0);
} }
@ -102,7 +102,7 @@ describe("LogsTable", () => {
test("renders correct number of skeleton rows", () => { test("renders correct number of skeleton rows", () => {
const { container } = render( const { container } = render(
<LogsTable {...defaultProps} status="loading" />, <LogsTable {...defaultProps} status="loading" />
); );
const skeletonRows = container.querySelectorAll("tbody tr"); const skeletonRows = container.querySelectorAll("tbody tr");
@ -118,10 +118,10 @@ describe("LogsTable", () => {
{...defaultProps} {...defaultProps}
status="error" status="error"
error={{ name: "Error", message: errorMessage } as Error} error={{ name: "Error", message: errorMessage } as Error}
/>, />
); );
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText(errorMessage)).toBeInTheDocument(); expect(screen.getByText(errorMessage)).toBeInTheDocument();
}); });
@ -132,29 +132,25 @@ describe("LogsTable", () => {
{...defaultProps} {...defaultProps}
status="error" status="error"
error={{ name: "Error", message: "" } as Error} error={{ name: "Error", message: "" } as Error}
/>, />
); );
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect( expect(
screen.getByText( screen.getByText("An unexpected error occurred while loading the data.")
"An unexpected error occurred while loading the data.",
),
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
test("renders default error message when error prop is an object without message", () => { test("renders default error message when error prop is an object without message", () => {
render( render(
<LogsTable {...defaultProps} status="error" error={{} as Error} />, <LogsTable {...defaultProps} status="error" error={{} as Error} />
); );
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect( expect(
screen.getByText( screen.getByText("An unexpected error occurred while loading the data.")
"An unexpected error occurred while loading the data.",
),
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -164,7 +160,7 @@ describe("LogsTable", () => {
{...defaultProps} {...defaultProps}
status="error" status="error"
error={{ name: "Error", message: "Test error" } as Error} error={{ name: "Error", message: "Test error" } as Error}
/>, />
); );
const table = screen.queryByRole("table"); const table = screen.queryByRole("table");
expect(table).not.toBeInTheDocument(); expect(table).not.toBeInTheDocument();
@ -178,7 +174,7 @@ describe("LogsTable", () => {
{...defaultProps} {...defaultProps}
data={[]} data={[]}
emptyMessage="Custom empty message" emptyMessage="Custom empty message"
/>, />
); );
expect(screen.getByText("Custom empty message")).toBeInTheDocument(); expect(screen.getByText("Custom empty message")).toBeInTheDocument();
@ -214,7 +210,7 @@ describe("LogsTable", () => {
{...defaultProps} {...defaultProps}
data={mockData} data={mockData}
caption="Custom table caption" caption="Custom table caption"
/>, />
); );
// Table caption // Table caption
@ -311,8 +307,8 @@ describe("LogsTable", () => {
// Verify truncated text is displayed // Verify truncated text is displayed
const truncatedTexts = screen.getAllByText("This is a ..."); const truncatedTexts = screen.getAllByText("This is a ...");
expect(truncatedTexts).toHaveLength(2); // one for input, one for output expect(truncatedTexts).toHaveLength(2); // one for input, one for output
truncatedTexts.forEach((textElement) => truncatedTexts.forEach(textElement =>
expect(textElement).toBeInTheDocument(), expect(textElement).toBeInTheDocument()
); );
}); });
@ -332,12 +328,12 @@ describe("LogsTable", () => {
// Model name should not be passed to truncateText // Model name should not be passed to truncateText
expect(truncateText).not.toHaveBeenCalledWith( expect(truncateText).not.toHaveBeenCalledWith(
"very-long-model-name-that-should-not-be-truncated", "very-long-model-name-that-should-not-be-truncated"
); );
// Full model name should be displayed // Full model name should be displayed
expect( expect(
screen.getByText("very-long-model-name-that-should-not-be-truncated"), screen.getByText("very-long-model-name-that-should-not-be-truncated")
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
}); });

View file

@ -142,7 +142,7 @@ export function LogsTable({
<Table> <Table>
<TableCaption className="sr-only">{caption}</TableCaption> <TableCaption className="sr-only">{caption}</TableCaption>
<TableBody> <TableBody>
{data.map((row) => ( {data.map(row => (
<TableRow <TableRow
key={row.id} key={row.id}
onClick={() => router.push(row.detailPath)} onClick={() => router.push(row.detailPath)}

View file

@ -22,7 +22,7 @@ export function GroupedItemsDisplay({
return ( return (
<> <>
{groupedItems.map((groupedItem) => { {groupedItems.map(groupedItem => {
// If this is a function call with an output, render the grouped component // If this is a function call with an output, render the grouped component
if ( if (
groupedItem.outputItem && groupedItem.outputItem &&

View file

@ -18,7 +18,7 @@ export interface GroupedItem {
* @returns Array of grouped items with their outputs * @returns Array of grouped items with their outputs
*/ */
export function useFunctionCallGrouping( export function useFunctionCallGrouping(
items: AnyResponseItem[], items: AnyResponseItem[]
): GroupedItem[] { ): GroupedItem[] {
return useMemo(() => { return useMemo(() => {
const groupedItems: GroupedItem[] = []; const groupedItems: GroupedItem[] = [];

View file

@ -52,7 +52,7 @@ export function ItemRenderer({
// Fallback to generic item for unknown types // Fallback to generic item for unknown types
return ( return (
<GenericItemComponent <GenericItemComponent
item={item as any} item={item as Record<string, unknown>}
index={index} index={index}
keyPrefix={keyPrefix} keyPrefix={keyPrefix}
/> />

View file

@ -20,7 +20,7 @@ export function MessageItemComponent({
content = item.content; content = item.content;
} else if (Array.isArray(item.content)) { } else if (Array.isArray(item.content)) {
content = item.content content = item.content
.map((c) => { .map(c => {
return c.type === "input_text" || c.type === "output_text" return c.type === "input_text" || c.type === "output_text"
? c.text ? c.text
: JSON.stringify(c); : JSON.stringify(c);

View file

@ -18,7 +18,7 @@ describe("ResponseDetailView", () => {
describe("Loading State", () => { describe("Loading State", () => {
test("renders loading skeleton when isLoading is true", () => { test("renders loading skeleton when isLoading is true", () => {
const { container } = render( const { container } = render(
<ResponseDetailView {...defaultProps} isLoading={true} />, <ResponseDetailView {...defaultProps} isLoading={true} />
); );
// Check for skeleton elements // Check for skeleton elements
@ -36,13 +36,13 @@ describe("ResponseDetailView", () => {
<ResponseDetailView <ResponseDetailView
{...defaultProps} {...defaultProps}
error={{ name: "Error", message: errorMessage }} error={{ name: "Error", message: errorMessage }}
/>, />
); );
expect(screen.getByText("Responses Details")).toBeInTheDocument(); expect(screen.getByText("Responses Details")).toBeInTheDocument();
// The error message is split across elements, so we check for parts // The error message is split across elements, so we check for parts
expect( expect(
screen.getByText(/Error loading details for ID/), screen.getByText(/Error loading details for ID/)
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText(/test_id/)).toBeInTheDocument(); expect(screen.getByText(/test_id/)).toBeInTheDocument();
expect(screen.getByText(/Network Error/)).toBeInTheDocument(); expect(screen.getByText(/Network Error/)).toBeInTheDocument();
@ -53,11 +53,11 @@ describe("ResponseDetailView", () => {
<ResponseDetailView <ResponseDetailView
{...defaultProps} {...defaultProps}
error={{ name: "Error", message: "" }} error={{ name: "Error", message: "" }}
/>, />
); );
expect( expect(
screen.getByText(/Error loading details for ID/), screen.getByText(/Error loading details for ID/)
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText(/test_id/)).toBeInTheDocument(); expect(screen.getByText(/test_id/)).toBeInTheDocument();
}); });
@ -124,14 +124,14 @@ describe("ResponseDetailView", () => {
// Check properties - use regex to handle text split across elements // Check properties - use regex to handle text split across elements
expect(screen.getByText(/Created/)).toBeInTheDocument(); expect(screen.getByText(/Created/)).toBeInTheDocument();
expect( expect(
screen.getByText(new Date(1710000000 * 1000).toLocaleString()), screen.getByText(new Date(1710000000 * 1000).toLocaleString())
).toBeInTheDocument(); ).toBeInTheDocument();
// Check for the specific ID label (not Previous Response ID) // Check for the specific ID label (not Previous Response ID)
expect( expect(
screen.getByText((content, element) => { screen.getByText((content, element) => {
return element?.tagName === "STRONG" && content === "ID:"; return element?.tagName === "STRONG" && content === "ID:";
}), })
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("resp_123")).toBeInTheDocument(); expect(screen.getByText("resp_123")).toBeInTheDocument();
@ -166,7 +166,7 @@ describe("ResponseDetailView", () => {
}; };
render( render(
<ResponseDetailView {...defaultProps} response={minimalResponse} />, <ResponseDetailView {...defaultProps} response={minimalResponse} />
); );
// Should show required properties // Should show required properties
@ -179,7 +179,7 @@ describe("ResponseDetailView", () => {
expect(screen.queryByText("Top P")).not.toBeInTheDocument(); expect(screen.queryByText("Top P")).not.toBeInTheDocument();
expect(screen.queryByText("Parallel Tool Calls")).not.toBeInTheDocument(); expect(screen.queryByText("Parallel Tool Calls")).not.toBeInTheDocument();
expect( expect(
screen.queryByText("Previous Response ID"), screen.queryByText("Previous Response ID")
).not.toBeInTheDocument(); ).not.toBeInTheDocument();
}); });
@ -196,7 +196,7 @@ describe("ResponseDetailView", () => {
// The error is shown in the properties sidebar, not as a separate "Error" label // The error is shown in the properties sidebar, not as a separate "Error" label
expect( expect(
screen.getByText("invalid_request: The request was invalid"), screen.getByText("invalid_request: The request was invalid")
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
}); });
@ -218,7 +218,7 @@ describe("ResponseDetailView", () => {
{...defaultProps} {...defaultProps}
response={mockResponse} response={mockResponse}
isLoadingInputItems={true} isLoadingInputItems={true}
/>, />
); );
// Check for skeleton loading in input items section // Check for skeleton loading in input items section
@ -227,7 +227,7 @@ describe("ResponseDetailView", () => {
{...defaultProps} {...defaultProps}
response={mockResponse} response={mockResponse}
isLoadingInputItems={true} isLoadingInputItems={true}
/>, />
); );
const skeletons = container.querySelectorAll('[data-slot="skeleton"]'); const skeletons = container.querySelectorAll('[data-slot="skeleton"]');
@ -243,16 +243,16 @@ describe("ResponseDetailView", () => {
name: "Error", name: "Error",
message: "Failed to load input items", message: "Failed to load input items",
}} }}
/>, />
); );
expect( expect(
screen.getByText( screen.getByText(
"Error loading input items: Failed to load input items", "Error loading input items: Failed to load input items"
), )
).toBeInTheDocument(); ).toBeInTheDocument();
expect( expect(
screen.getByText("Falling back to response input data."), screen.getByText("Falling back to response input data.")
).toBeInTheDocument(); ).toBeInTheDocument();
// Should still show fallback input data // Should still show fallback input data
@ -276,7 +276,7 @@ describe("ResponseDetailView", () => {
{...defaultProps} {...defaultProps}
response={mockResponse} response={mockResponse}
inputItems={mockInputItems} inputItems={mockInputItems}
/>, />
); );
// Should show input items data, not response.input // Should show input items data, not response.input
@ -295,7 +295,7 @@ describe("ResponseDetailView", () => {
{...defaultProps} {...defaultProps}
response={mockResponse} response={mockResponse}
inputItems={emptyInputItems} inputItems={emptyInputItems}
/>, />
); );
// Should show fallback input data // Should show fallback input data
@ -313,7 +313,7 @@ describe("ResponseDetailView", () => {
{...defaultProps} {...defaultProps}
response={responseWithoutInput} response={responseWithoutInput}
inputItems={null} inputItems={null}
/>, />
); );
expect(screen.getByText("No input data available.")).toBeInTheDocument(); expect(screen.getByText("No input data available.")).toBeInTheDocument();
@ -443,7 +443,7 @@ describe("ResponseDetailView", () => {
render(<ResponseDetailView {...defaultProps} response={mockResponse} />); render(<ResponseDetailView {...defaultProps} response={mockResponse} />);
expect( expect(
screen.getByText('input_function({"param": "value"})'), screen.getByText('input_function({"param": "value"})')
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Function Call")).toBeInTheDocument(); expect(screen.getByText("Function Call")).toBeInTheDocument();
}); });
@ -468,7 +468,7 @@ describe("ResponseDetailView", () => {
render(<ResponseDetailView {...defaultProps} response={mockResponse} />); render(<ResponseDetailView {...defaultProps} response={mockResponse} />);
expect( expect(
screen.getByText("web_search_call(status: completed)"), screen.getByText("web_search_call(status: completed)")
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Function Call")).toBeInTheDocument(); expect(screen.getByText("Function Call")).toBeInTheDocument();
expect(screen.getByText("(Web Search)")).toBeInTheDocument(); expect(screen.getByText("(Web Search)")).toBeInTheDocument();
@ -522,7 +522,7 @@ describe("ResponseDetailView", () => {
render(<ResponseDetailView {...defaultProps} response={mockResponse} />); render(<ResponseDetailView {...defaultProps} response={mockResponse} />);
expect( expect(
screen.getByText("First output Second output"), screen.getByText("First output Second output")
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Assistant")).toBeInTheDocument(); expect(screen.getByText("Assistant")).toBeInTheDocument();
}); });
@ -549,7 +549,7 @@ describe("ResponseDetailView", () => {
render(<ResponseDetailView {...defaultProps} response={mockResponse} />); render(<ResponseDetailView {...defaultProps} response={mockResponse} />);
expect( expect(
screen.getByText('search_function({"query": "test"})'), screen.getByText('search_function({"query": "test"})')
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Function Call")).toBeInTheDocument(); expect(screen.getByText("Function Call")).toBeInTheDocument();
}); });
@ -598,7 +598,7 @@ describe("ResponseDetailView", () => {
render(<ResponseDetailView {...defaultProps} response={mockResponse} />); render(<ResponseDetailView {...defaultProps} response={mockResponse} />);
expect( expect(
screen.getByText("web_search_call(status: completed)"), screen.getByText("web_search_call(status: completed)")
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText(/Function Call/)).toBeInTheDocument(); expect(screen.getByText(/Function Call/)).toBeInTheDocument();
expect(screen.getByText("(Web Search)")).toBeInTheDocument(); expect(screen.getByText("(Web Search)")).toBeInTheDocument();
@ -616,7 +616,7 @@ describe("ResponseDetailView", () => {
type: "unknown_type", type: "unknown_type",
custom_field: "custom_value", custom_field: "custom_value",
data: { nested: "object" }, data: { nested: "object" },
} as any, } as unknown,
], ],
input: [], input: [],
}; };
@ -625,7 +625,7 @@ describe("ResponseDetailView", () => {
// Should show JSON stringified content // Should show JSON stringified content
expect( expect(
screen.getByText(/custom_field.*custom_value/), screen.getByText(/custom_field.*custom_value/)
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("(unknown_type)")).toBeInTheDocument(); expect(screen.getByText("(unknown_type)")).toBeInTheDocument();
}); });
@ -666,7 +666,7 @@ describe("ResponseDetailView", () => {
role: "assistant", role: "assistant",
call_id: "call_123", call_id: "call_123",
content: "sunny and warm", content: "sunny and warm",
} as any, // Using any to bypass the type restriction for this test } as unknown, // Using any to bypass the type restriction for this test
], ],
input: [], input: [],
}; };
@ -676,7 +676,7 @@ describe("ResponseDetailView", () => {
// Should show the function call and message as separate items (not grouped) // Should show the function call and message as separate items (not grouped)
expect(screen.getByText("Function Call")).toBeInTheDocument(); expect(screen.getByText("Function Call")).toBeInTheDocument();
expect( expect(
screen.getByText('get_weather({"city": "Tokyo"})'), screen.getByText('get_weather({"city": "Tokyo"})')
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Assistant")).toBeInTheDocument(); expect(screen.getByText("Assistant")).toBeInTheDocument();
expect(screen.getByText("sunny and warm")).toBeInTheDocument(); expect(screen.getByText("sunny and warm")).toBeInTheDocument();
@ -706,7 +706,7 @@ describe("ResponseDetailView", () => {
status: "completed", status: "completed",
call_id: "call_123", call_id: "call_123",
output: "sunny and warm", output: "sunny and warm",
} as any, // Using any to bypass the type restriction for this test } as unknown,
], ],
input: [], input: [],
}; };
@ -717,7 +717,7 @@ describe("ResponseDetailView", () => {
expect(screen.getByText("Function Call")).toBeInTheDocument(); expect(screen.getByText("Function Call")).toBeInTheDocument();
expect(screen.getByText("Arguments")).toBeInTheDocument(); expect(screen.getByText("Arguments")).toBeInTheDocument();
expect( expect(
screen.getByText('get_weather({"city": "Tokyo"})'), screen.getByText('get_weather({"city": "Tokyo"})')
).toBeInTheDocument(); ).toBeInTheDocument();
// Use getAllByText since there are multiple "Output" elements (card title and output label) // Use getAllByText since there are multiple "Output" elements (card title and output label)
const outputElements = screen.getAllByText("Output"); const outputElements = screen.getAllByText("Output");

View file

@ -146,7 +146,7 @@ describe("ResponsesTable", () => {
expect(tableCaption).toBeInTheDocument(); expect(tableCaption).toBeInTheDocument();
if (tableCaption) { if (tableCaption) {
const captionSkeleton = tableCaption.querySelector( const captionSkeleton = tableCaption.querySelector(
'[data-slot="skeleton"]', '[data-slot="skeleton"]'
); );
expect(captionSkeleton).toBeInTheDocument(); expect(captionSkeleton).toBeInTheDocument();
} }
@ -156,7 +156,7 @@ describe("ResponsesTable", () => {
expect(tableBody).toBeInTheDocument(); expect(tableBody).toBeInTheDocument();
if (tableBody) { if (tableBody) {
const bodySkeletons = tableBody.querySelectorAll( const bodySkeletons = tableBody.querySelectorAll(
'[data-slot="skeleton"]', '[data-slot="skeleton"]'
); );
expect(bodySkeletons.length).toBeGreaterThan(0); expect(bodySkeletons.length).toBeGreaterThan(0);
} }
@ -176,14 +176,14 @@ describe("ResponsesTable", () => {
render(<ResponsesTable {...defaultProps} />); render(<ResponsesTable {...defaultProps} />);
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText(errorMessage)).toBeInTheDocument(); expect(screen.getByText(errorMessage)).toBeInTheDocument();
}); });
test.each([{ name: "Error", message: "" }, {}])( test.each([{ name: "Error", message: "" }, {}])(
"renders default error message when error has no message", "renders default error message when error has no message",
(errorObject) => { errorObject => {
mockedUsePagination.mockReturnValue({ mockedUsePagination.mockReturnValue({
data: [], data: [],
status: "error", status: "error",
@ -194,14 +194,14 @@ describe("ResponsesTable", () => {
render(<ResponsesTable {...defaultProps} />); render(<ResponsesTable {...defaultProps} />);
expect( expect(
screen.getByText("Unable to load chat completions"), screen.getByText("Unable to load chat completions")
).toBeInTheDocument(); ).toBeInTheDocument();
expect( expect(
screen.getByText( screen.getByText(
"An unexpected error occurred while loading the data.", "An unexpected error occurred while loading the data."
), )
).toBeInTheDocument(); ).toBeInTheDocument();
}, }
); );
}); });
@ -275,7 +275,7 @@ describe("ResponsesTable", () => {
// Table caption // Table caption
expect( expect(
screen.getByText("A list of your recent responses."), screen.getByText("A list of your recent responses.")
).toBeInTheDocument(); ).toBeInTheDocument();
// Table headers // Table headers
@ -289,14 +289,14 @@ describe("ResponsesTable", () => {
expect(screen.getByText("Test output")).toBeInTheDocument(); expect(screen.getByText("Test output")).toBeInTheDocument();
expect(screen.getByText("llama-test-model")).toBeInTheDocument(); expect(screen.getByText("llama-test-model")).toBeInTheDocument();
expect( expect(
screen.getByText(new Date(1710000000 * 1000).toLocaleString()), screen.getByText(new Date(1710000000 * 1000).toLocaleString())
).toBeInTheDocument(); ).toBeInTheDocument();
expect(screen.getByText("Another input")).toBeInTheDocument(); expect(screen.getByText("Another input")).toBeInTheDocument();
expect(screen.getByText("Another output")).toBeInTheDocument(); expect(screen.getByText("Another output")).toBeInTheDocument();
expect(screen.getByText("llama-another-model")).toBeInTheDocument(); expect(screen.getByText("llama-another-model")).toBeInTheDocument();
expect( expect(
screen.getByText(new Date(1710001000 * 1000).toLocaleString()), screen.getByText(new Date(1710001000 * 1000).toLocaleString())
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
}); });
@ -487,7 +487,7 @@ describe("ResponsesTable", () => {
render(<ResponsesTable {...defaultProps} />); render(<ResponsesTable {...defaultProps} />);
expect( expect(
screen.getByText('search_function({"query": "test"})'), screen.getByText('search_function({"query": "test"})')
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -548,7 +548,7 @@ describe("ResponsesTable", () => {
render(<ResponsesTable {...defaultProps} />); render(<ResponsesTable {...defaultProps} />);
expect( expect(
screen.getByText("web_search_call(status: completed)"), screen.getByText("web_search_call(status: completed)")
).toBeInTheDocument(); ).toBeInTheDocument();
}); });
@ -565,7 +565,7 @@ describe("ResponsesTable", () => {
id: "unknown_123", id: "unknown_123",
status: "completed", status: "completed",
custom_field: "custom_value", custom_field: "custom_value",
} as any, } as unknown,
], ],
input: [{ type: "message", content: "input" }], input: [{ type: "message", content: "input" }],
}; };
@ -594,7 +594,7 @@ describe("ResponsesTable", () => {
{ {
type: "unknown_type", type: "unknown_type",
data: "some data", data: "some data",
} as any, } as unknown,
], ],
input: [{ type: "message", content: "input" }], input: [{ type: "message", content: "input" }],
}; };
@ -623,7 +623,7 @@ describe("ResponsesTable", () => {
return typeof text === "string" && text.length > effectiveMaxLength return typeof text === "string" && text.length > effectiveMaxLength
? text.slice(0, effectiveMaxLength) + "..." ? text.slice(0, effectiveMaxLength) + "..."
: text; : text;
}, }
); );
const longInput = const longInput =
@ -665,7 +665,7 @@ describe("ResponsesTable", () => {
// The truncated text should be present for both input and output // The truncated text should be present for both input and output
const truncatedTexts = screen.getAllByText( const truncatedTexts = screen.getAllByText(
longInput.slice(0, 10) + "...", longInput.slice(0, 10) + "..."
); );
expect(truncatedTexts.length).toBe(2); // one for input, one for output expect(truncatedTexts.length).toBe(2); // one for input, one for output
}); });

View file

@ -27,7 +27,7 @@ interface ResponsesTableProps {
* Helper function to convert ResponseListResponse.Data to OpenAIResponse * Helper function to convert ResponseListResponse.Data to OpenAIResponse
*/ */
const convertResponseListData = ( const convertResponseListData = (
responseData: ResponseListResponse.Data, responseData: ResponseListResponse.Data
): OpenAIResponse => { ): OpenAIResponse => {
return { return {
id: responseData.id, id: responseData.id,
@ -56,8 +56,8 @@ function getInputText(response: OpenAIResponse): string {
} }
function getOutputText(response: OpenAIResponse): string { function getOutputText(response: OpenAIResponse): string {
const firstMessage = response.output.find((item) => const firstMessage = response.output.find(item =>
isMessageItem(item as any), isMessageItem(item as Record<string, unknown>)
); );
if (firstMessage) { if (firstMessage) {
const content = extractContentFromItem(firstMessage as MessageItem); const content = extractContentFromItem(firstMessage as MessageItem);
@ -66,15 +66,15 @@ function getOutputText(response: OpenAIResponse): string {
} }
} }
const functionCall = response.output.find((item) => const functionCall = response.output.find(item =>
isFunctionCallItem(item as any), isFunctionCallItem(item as Record<string, unknown>)
); );
if (functionCall) { if (functionCall) {
return formatFunctionCall(functionCall as FunctionCallItem); return formatFunctionCall(functionCall as FunctionCallItem);
} }
const webSearchCall = response.output.find((item) => const webSearchCall = response.output.find(item =>
isWebSearchCallItem(item as any), isWebSearchCallItem(item as Record<string, unknown>)
); );
if (webSearchCall) { if (webSearchCall) {
return formatWebSearchCall(webSearchCall as WebSearchCallItem); return formatWebSearchCall(webSearchCall as WebSearchCallItem);
@ -95,7 +95,7 @@ function extractContentFromItem(item: {
} else if (Array.isArray(item.content)) { } else if (Array.isArray(item.content)) {
const textContent = item.content.find( const textContent = item.content.find(
(c: ResponseInputMessageContent) => (c: ResponseInputMessageContent) =>
c.type === "input_text" || c.type === "output_text", c.type === "input_text" || c.type === "output_text"
); );
return textContent?.text || ""; return textContent?.text || "";
} }
@ -131,14 +131,14 @@ export function ResponsesTable({ paginationOptions }: ResponsesTableProps) {
limit: number; limit: number;
model?: string; model?: string;
order?: string; order?: string;
}, }
) => { ) => {
const response = await client.responses.list({ const response = await client.responses.list({
after: params.after, after: params.after,
limit: params.limit, limit: params.limit,
...(params.model && { model: params.model }), ...(params.model && { model: params.model }),
...(params.order && { order: params.order }), ...(params.order && { order: params.order }),
} as any); } as Parameters<typeof client.responses.list>[0]);
const listResponse = response as ResponseListResponse; const listResponse = response as ResponseListResponse;

View file

@ -29,7 +29,7 @@ export type AnyResponseItem =
| FunctionCallOutputItem; | FunctionCallOutputItem;
export function isMessageInput( export function isMessageInput(
item: ResponseInput, item: ResponseInput
): item is ResponseInput & { type: "message" } { ): item is ResponseInput & { type: "message" } {
return item.type === "message"; return item.type === "message";
} }
@ -39,23 +39,23 @@ export function isMessageItem(item: AnyResponseItem): item is MessageItem {
} }
export function isFunctionCallItem( export function isFunctionCallItem(
item: AnyResponseItem, item: AnyResponseItem
): item is FunctionCallItem { ): item is FunctionCallItem {
return item.type === "function_call" && "name" in item; return item.type === "function_call" && "name" in item;
} }
export function isWebSearchCallItem( export function isWebSearchCallItem(
item: AnyResponseItem, item: AnyResponseItem
): item is WebSearchCallItem { ): item is WebSearchCallItem {
return item.type === "web_search_call"; return item.type === "web_search_call";
} }
export function isFunctionCallOutputItem( export function isFunctionCallOutputItem(
item: AnyResponseItem, item: AnyResponseItem
): item is FunctionCallOutputItem { ): item is FunctionCallOutputItem {
return ( return (
item.type === "function_call_output" && item.type === "function_call_output" &&
"call_id" in item && "call_id" in item &&
typeof (item as any).call_id === "string" typeof (item as Record<string, unknown>).call_id === "string"
); );
} }

View file

@ -1,6 +1,6 @@
"use client" "use client";
import { useEffect, useRef } from "react" import { useEffect, useRef } from "react";
// Configuration constants for the audio analyzer // Configuration constants for the audio analyzer
const AUDIO_CONFIG = { const AUDIO_CONFIG = {
@ -14,12 +14,12 @@ const AUDIO_CONFIG = {
MAX_INTENSITY: 255, // Maximum gray value (brighter) MAX_INTENSITY: 255, // Maximum gray value (brighter)
INTENSITY_RANGE: 155, // MAX_INTENSITY - MIN_INTENSITY INTENSITY_RANGE: 155, // MAX_INTENSITY - MIN_INTENSITY
}, },
} as const } as const;
interface AudioVisualizerProps { interface AudioVisualizerProps {
stream: MediaStream | null stream: MediaStream | null;
isRecording: boolean isRecording: boolean;
onClick: () => void onClick: () => void;
} }
export function AudioVisualizer({ export function AudioVisualizer({
@ -28,91 +28,91 @@ export function AudioVisualizer({
onClick, onClick,
}: AudioVisualizerProps) { }: AudioVisualizerProps) {
// Refs for managing audio context and animation // Refs for managing audio context and animation
const canvasRef = useRef<HTMLCanvasElement>(null) const canvasRef = useRef<HTMLCanvasElement>(null);
const audioContextRef = useRef<AudioContext | null>(null) const audioContextRef = useRef<AudioContext | null>(null);
const analyserRef = useRef<AnalyserNode | null>(null) const analyserRef = useRef<AnalyserNode | null>(null);
const animationFrameRef = useRef<number>() const animationFrameRef = useRef<number>();
const containerRef = useRef<HTMLDivElement>(null) const containerRef = useRef<HTMLDivElement>(null);
// Cleanup function to stop visualization and close audio context // Cleanup function to stop visualization and close audio context
const cleanup = () => { const cleanup = () => {
if (animationFrameRef.current) { if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current) cancelAnimationFrame(animationFrameRef.current);
} }
if (audioContextRef.current) { if (audioContextRef.current) {
audioContextRef.current.close() audioContextRef.current.close();
}
} }
};
// Cleanup on unmount // Cleanup on unmount
useEffect(() => { useEffect(() => {
return cleanup return cleanup;
}, []) }, []);
// Start or stop visualization based on recording state // Start or stop visualization based on recording state
useEffect(() => { useEffect(() => {
if (stream && isRecording) { if (stream && isRecording) {
startVisualization() startVisualization();
} else { } else {
cleanup() cleanup();
} }
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [stream, isRecording]) }, [stream, isRecording]);
// Handle window resize // Handle window resize
useEffect(() => { useEffect(() => {
const handleResize = () => { const handleResize = () => {
if (canvasRef.current && containerRef.current) { if (canvasRef.current && containerRef.current) {
const container = containerRef.current const container = containerRef.current;
const canvas = canvasRef.current const canvas = canvasRef.current;
const dpr = window.devicePixelRatio || 1 const dpr = window.devicePixelRatio || 1;
// Set canvas size based on container and device pixel ratio // Set canvas size based on container and device pixel ratio
const rect = container.getBoundingClientRect() const rect = container.getBoundingClientRect();
// Account for the 2px total margin (1px on each side) // Account for the 2px total margin (1px on each side)
canvas.width = (rect.width - 2) * dpr canvas.width = (rect.width - 2) * dpr;
canvas.height = (rect.height - 2) * dpr canvas.height = (rect.height - 2) * dpr;
// Scale canvas CSS size to match container minus margins // Scale canvas CSS size to match container minus margins
canvas.style.width = `${rect.width - 2}px` canvas.style.width = `${rect.width - 2}px`;
canvas.style.height = `${rect.height - 2}px` canvas.style.height = `${rect.height - 2}px`;
}
} }
};
window.addEventListener("resize", handleResize) window.addEventListener("resize", handleResize);
// Initial setup // Initial setup
handleResize() handleResize();
return () => window.removeEventListener("resize", handleResize) return () => window.removeEventListener("resize", handleResize);
}, []) }, []);
// Initialize audio context and start visualization // Initialize audio context and start visualization
const startVisualization = async () => { const startVisualization = async () => {
try { try {
const audioContext = new AudioContext() const audioContext = new AudioContext();
audioContextRef.current = audioContext audioContextRef.current = audioContext;
const analyser = audioContext.createAnalyser() const analyser = audioContext.createAnalyser();
analyser.fftSize = AUDIO_CONFIG.FFT_SIZE analyser.fftSize = AUDIO_CONFIG.FFT_SIZE;
analyser.smoothingTimeConstant = AUDIO_CONFIG.SMOOTHING analyser.smoothingTimeConstant = AUDIO_CONFIG.SMOOTHING;
analyserRef.current = analyser analyserRef.current = analyser;
const source = audioContext.createMediaStreamSource(stream!) const source = audioContext.createMediaStreamSource(stream!);
source.connect(analyser) source.connect(analyser);
draw() draw();
} catch (error) { } catch (error) {
console.error("Error starting visualization:", error) console.error("Error starting visualization:", error);
}
} }
};
// Calculate the color intensity based on bar height // Calculate the color intensity based on bar height
const getBarColor = (normalizedHeight: number) => { const getBarColor = (normalizedHeight: number) => {
const intensity = const intensity =
Math.floor(normalizedHeight * AUDIO_CONFIG.COLOR.INTENSITY_RANGE) + Math.floor(normalizedHeight * AUDIO_CONFIG.COLOR.INTENSITY_RANGE) +
AUDIO_CONFIG.COLOR.MIN_INTENSITY AUDIO_CONFIG.COLOR.MIN_INTENSITY;
return `rgb(${intensity}, ${intensity}, ${intensity})` return `rgb(${intensity}, ${intensity}, ${intensity})`;
} };
// Draw a single bar of the visualizer // Draw a single bar of the visualizer
const drawBar = ( const drawBar = (
@ -123,52 +123,52 @@ export function AudioVisualizer({
height: number, height: number,
color: string color: string
) => { ) => {
ctx.fillStyle = color ctx.fillStyle = color;
// Draw upper bar (above center) // Draw upper bar (above center)
ctx.fillRect(x, centerY - height, width, height) ctx.fillRect(x, centerY - height, width, height);
// Draw lower bar (below center) // Draw lower bar (below center)
ctx.fillRect(x, centerY, width, height) ctx.fillRect(x, centerY, width, height);
} };
// Main drawing function // Main drawing function
const draw = () => { const draw = () => {
if (!isRecording) return if (!isRecording) return;
const canvas = canvasRef.current const canvas = canvasRef.current;
const ctx = canvas?.getContext("2d") const ctx = canvas?.getContext("2d");
if (!canvas || !ctx || !analyserRef.current) return if (!canvas || !ctx || !analyserRef.current) return;
const dpr = window.devicePixelRatio || 1 const dpr = window.devicePixelRatio || 1;
ctx.scale(dpr, dpr) ctx.scale(dpr, dpr);
const analyser = analyserRef.current const analyser = analyserRef.current;
const bufferLength = analyser.frequencyBinCount const bufferLength = analyser.frequencyBinCount;
const frequencyData = new Uint8Array(bufferLength) const frequencyData = new Uint8Array(bufferLength);
const drawFrame = () => { const drawFrame = () => {
animationFrameRef.current = requestAnimationFrame(drawFrame) animationFrameRef.current = requestAnimationFrame(drawFrame);
// Get current frequency data // Get current frequency data
analyser.getByteFrequencyData(frequencyData) analyser.getByteFrequencyData(frequencyData);
// Clear canvas - use CSS pixels for clearing // Clear canvas - use CSS pixels for clearing
ctx.clearRect(0, 0, canvas.width / dpr, canvas.height / dpr) ctx.clearRect(0, 0, canvas.width / dpr, canvas.height / dpr);
// Calculate dimensions in CSS pixels // Calculate dimensions in CSS pixels
const barWidth = Math.max( const barWidth = Math.max(
AUDIO_CONFIG.MIN_BAR_WIDTH, AUDIO_CONFIG.MIN_BAR_WIDTH,
canvas.width / dpr / bufferLength - AUDIO_CONFIG.BAR_SPACING canvas.width / dpr / bufferLength - AUDIO_CONFIG.BAR_SPACING
) );
const centerY = canvas.height / dpr / 2 const centerY = canvas.height / dpr / 2;
let x = 0 let x = 0;
// Draw each frequency bar // Draw each frequency bar
for (let i = 0; i < bufferLength; i++) { for (let i = 0; i < bufferLength; i++) {
const normalizedHeight = frequencyData[i] / 255 // Convert to 0-1 range const normalizedHeight = frequencyData[i] / 255; // Convert to 0-1 range
const barHeight = Math.max( const barHeight = Math.max(
AUDIO_CONFIG.MIN_BAR_HEIGHT, AUDIO_CONFIG.MIN_BAR_HEIGHT,
normalizedHeight * centerY normalizedHeight * centerY
) );
drawBar( drawBar(
ctx, ctx,
@ -177,14 +177,14 @@ export function AudioVisualizer({
barWidth, barWidth,
barHeight, barHeight,
getBarColor(normalizedHeight) getBarColor(normalizedHeight)
) );
x += barWidth + AUDIO_CONFIG.BAR_SPACING x += barWidth + AUDIO_CONFIG.BAR_SPACING;
}
} }
};
drawFrame() drawFrame();
} };
return ( return (
<div <div
@ -194,5 +194,5 @@ export function AudioVisualizer({
> >
<canvas ref={canvasRef} className="h-full w-full" /> <canvas ref={canvasRef} className="h-full w-full" />
</div> </div>
) );
} }

View file

@ -14,7 +14,7 @@ function BreadcrumbList({ className, ...props }: React.ComponentProps<"ol">) {
data-slot="breadcrumb-list" data-slot="breadcrumb-list"
className={cn( className={cn(
"text-muted-foreground flex flex-wrap items-center gap-1.5 text-sm break-words sm:gap-2.5", "text-muted-foreground flex flex-wrap items-center gap-1.5 text-sm break-words sm:gap-2.5",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -1,8 +1,8 @@
import * as React from "react" import * as React from "react";
import { Slot } from "@radix-ui/react-slot" import { Slot } from "@radix-ui/react-slot";
import { cva, type VariantProps } from "class-variance-authority" import { cva, type VariantProps } from "class-variance-authority";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
const buttonVariants = cva( const buttonVariants = cva(
"inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive", "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
@ -33,7 +33,7 @@ const buttonVariants = cva(
size: "default", size: "default",
}, },
} }
) );
function Button({ function Button({
className, className,
@ -43,9 +43,9 @@ function Button({
...props ...props
}: React.ComponentProps<"button"> & }: React.ComponentProps<"button"> &
VariantProps<typeof buttonVariants> & { VariantProps<typeof buttonVariants> & {
asChild?: boolean asChild?: boolean;
}) { }) {
const Comp = asChild ? Slot : "button" const Comp = asChild ? Slot : "button";
return ( return (
<Comp <Comp
@ -53,7 +53,7 @@ function Button({
className={cn(buttonVariants({ variant, size, className }))} className={cn(buttonVariants({ variant, size, className }))}
{...props} {...props}
/> />
) );
} }
export { Button, buttonVariants } export { Button, buttonVariants };

View file

@ -8,7 +8,7 @@ function Card({ className, ...props }: React.ComponentProps<"div">) {
data-slot="card" data-slot="card"
className={cn( className={cn(
"bg-card text-card-foreground flex flex-col gap-6 rounded-xl border py-6 shadow-sm", "bg-card text-card-foreground flex flex-col gap-6 rounded-xl border py-6 shadow-sm",
className, className
)} )}
{...props} {...props}
/> />
@ -21,7 +21,7 @@ function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
data-slot="card-header" data-slot="card-header"
className={cn( className={cn(
"@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-1.5 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6", "@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-1.5 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6",
className, className
)} )}
{...props} {...props}
/> />
@ -54,7 +54,7 @@ function CardAction({ className, ...props }: React.ComponentProps<"div">) {
data-slot="card-action" data-slot="card-action"
className={cn( className={cn(
"col-start-2 row-span-2 row-start-1 self-start justify-self-end", "col-start-2 row-span-2 row-start-1 self-start justify-self-end",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -1,11 +1,11 @@
"use client" "use client";
import * as CollapsiblePrimitive from "@radix-ui/react-collapsible" import * as CollapsiblePrimitive from "@radix-ui/react-collapsible";
function Collapsible({ function Collapsible({
...props ...props
}: React.ComponentProps<typeof CollapsiblePrimitive.Root>) { }: React.ComponentProps<typeof CollapsiblePrimitive.Root>) {
return <CollapsiblePrimitive.Root data-slot="collapsible" {...props} /> return <CollapsiblePrimitive.Root data-slot="collapsible" {...props} />;
} }
function CollapsibleTrigger({ function CollapsibleTrigger({
@ -16,7 +16,7 @@ function CollapsibleTrigger({
data-slot="collapsible-trigger" data-slot="collapsible-trigger"
{...props} {...props}
/> />
) );
} }
function CollapsibleContent({ function CollapsibleContent({
@ -27,7 +27,7 @@ function CollapsibleContent({
data-slot="collapsible-content" data-slot="collapsible-content"
{...props} {...props}
/> />
) );
} }
export { Collapsible, CollapsibleTrigger, CollapsibleContent } export { Collapsible, CollapsibleTrigger, CollapsibleContent };

View file

@ -1,21 +1,21 @@
"use client" "use client";
import { Check, Copy } from "lucide-react" import { Check, Copy } from "lucide-react";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
import { useCopyToClipboard } from "@/hooks/use-copy-to-clipboard" import { useCopyToClipboard } from "@/hooks/use-copy-to-clipboard";
import { Button } from "@/components/ui/button" import { Button } from "@/components/ui/button";
type CopyButtonProps = { type CopyButtonProps = {
content: string content: string;
copyMessage?: string copyMessage?: string;
} };
export function CopyButton({ content, copyMessage }: CopyButtonProps) { export function CopyButton({ content, copyMessage }: CopyButtonProps) {
const { isCopied, handleCopy } = useCopyToClipboard({ const { isCopied, handleCopy } = useCopyToClipboard({
text: content, text: content,
copyMessage, copyMessage,
}) });
return ( return (
<Button <Button
@ -40,5 +40,5 @@ export function CopyButton({ content, copyMessage }: CopyButtonProps) {
)} )}
/> />
</Button> </Button>
) );
} }

View file

@ -43,7 +43,7 @@ function DropdownMenuContent({
sideOffset={sideOffset} sideOffset={sideOffset}
className={cn( className={cn(
"bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 max-h-(--radix-dropdown-menu-content-available-height) min-w-[8rem] origin-(--radix-dropdown-menu-content-transform-origin) overflow-x-hidden overflow-y-auto rounded-md border p-1 shadow-md", "bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 max-h-(--radix-dropdown-menu-content-available-height) min-w-[8rem] origin-(--radix-dropdown-menu-content-transform-origin) overflow-x-hidden overflow-y-auto rounded-md border p-1 shadow-md",
className, className
)} )}
{...props} {...props}
/> />
@ -75,7 +75,7 @@ function DropdownMenuItem({
data-variant={variant} data-variant={variant}
className={cn( className={cn(
"focus:bg-accent focus:text-accent-foreground data-[variant=destructive]:text-destructive data-[variant=destructive]:focus:bg-destructive/10 dark:data-[variant=destructive]:focus:bg-destructive/20 data-[variant=destructive]:focus:text-destructive data-[variant=destructive]:*:[svg]:!text-destructive [&_svg:not([class*='text-'])]:text-muted-foreground relative flex cursor-default items-center gap-2 rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 data-[inset]:pl-8 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4", "focus:bg-accent focus:text-accent-foreground data-[variant=destructive]:text-destructive data-[variant=destructive]:focus:bg-destructive/10 dark:data-[variant=destructive]:focus:bg-destructive/20 data-[variant=destructive]:focus:text-destructive data-[variant=destructive]:*:[svg]:!text-destructive [&_svg:not([class*='text-'])]:text-muted-foreground relative flex cursor-default items-center gap-2 rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 data-[inset]:pl-8 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className, className
)} )}
{...props} {...props}
/> />
@ -93,7 +93,7 @@ function DropdownMenuCheckboxItem({
data-slot="dropdown-menu-checkbox-item" data-slot="dropdown-menu-checkbox-item"
className={cn( className={cn(
"focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4", "focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className, className
)} )}
checked={checked} checked={checked}
{...props} {...props}
@ -129,7 +129,7 @@ function DropdownMenuRadioItem({
data-slot="dropdown-menu-radio-item" data-slot="dropdown-menu-radio-item"
className={cn( className={cn(
"focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4", "focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-hidden select-none data-[disabled]:pointer-events-none data-[disabled]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className, className
)} )}
{...props} {...props}
> >
@ -156,7 +156,7 @@ function DropdownMenuLabel({
data-inset={inset} data-inset={inset}
className={cn( className={cn(
"px-2 py-1.5 text-sm font-medium data-[inset]:pl-8", "px-2 py-1.5 text-sm font-medium data-[inset]:pl-8",
className, className
)} )}
{...props} {...props}
/> />
@ -185,7 +185,7 @@ function DropdownMenuShortcut({
data-slot="dropdown-menu-shortcut" data-slot="dropdown-menu-shortcut"
className={cn( className={cn(
"text-muted-foreground ml-auto text-xs tracking-widest", "text-muted-foreground ml-auto text-xs tracking-widest",
className, className
)} )}
{...props} {...props}
/> />
@ -212,7 +212,7 @@ function DropdownMenuSubTrigger({
data-inset={inset} data-inset={inset}
className={cn( className={cn(
"focus:bg-accent focus:text-accent-foreground data-[state=open]:bg-accent data-[state=open]:text-accent-foreground flex cursor-default items-center rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-[inset]:pl-8", "focus:bg-accent focus:text-accent-foreground data-[state=open]:bg-accent data-[state=open]:text-accent-foreground flex cursor-default items-center rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-[inset]:pl-8",
className, className
)} )}
{...props} {...props}
> >
@ -231,7 +231,7 @@ function DropdownMenuSubContent({
data-slot="dropdown-menu-sub-content" data-slot="dropdown-menu-sub-content"
className={cn( className={cn(
"bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 min-w-[8rem] origin-(--radix-dropdown-menu-content-transform-origin) overflow-hidden rounded-md border p-1 shadow-lg", "bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 min-w-[8rem] origin-(--radix-dropdown-menu-content-transform-origin) overflow-hidden rounded-md border p-1 shadow-lg",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -1,18 +1,18 @@
"use client" "use client";
import React, { useEffect } from "react" import React, { useEffect } from "react";
import { motion } from "framer-motion" import { motion } from "framer-motion";
import { FileIcon, X } from "lucide-react" import { FileIcon, X } from "lucide-react";
interface FilePreviewProps { interface FilePreviewProps {
file: File file: File;
onRemove?: () => void onRemove?: () => void;
} }
export const FilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>( export const FilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
(props, ref) => { (props, ref) => {
if (props.file.type.startsWith("image/")) { if (props.file.type.startsWith("image/")) {
return <ImageFilePreview {...props} ref={ref} /> return <ImageFilePreview {...props} ref={ref} />;
} }
if ( if (
@ -20,13 +20,13 @@ export const FilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
props.file.name.endsWith(".txt") || props.file.name.endsWith(".txt") ||
props.file.name.endsWith(".md") props.file.name.endsWith(".md")
) { ) {
return <TextFilePreview {...props} ref={ref} /> return <TextFilePreview {...props} ref={ref} />;
} }
return <GenericFilePreview {...props} ref={ref} /> return <GenericFilePreview {...props} ref={ref} />;
} }
) );
FilePreview.displayName = "FilePreview" FilePreview.displayName = "FilePreview";
const ImageFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>( const ImageFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
({ file, onRemove }, ref) => { ({ file, onRemove }, ref) => {
@ -62,23 +62,23 @@ const ImageFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
</button> </button>
) : null} ) : null}
</motion.div> </motion.div>
) );
} }
) );
ImageFilePreview.displayName = "ImageFilePreview" ImageFilePreview.displayName = "ImageFilePreview";
const TextFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>( const TextFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
({ file, onRemove }, ref) => { ({ file, onRemove }, ref) => {
const [preview, setPreview] = React.useState<string>("") const [preview, setPreview] = React.useState<string>("");
useEffect(() => { useEffect(() => {
const reader = new FileReader() const reader = new FileReader();
reader.onload = (e) => { reader.onload = e => {
const text = e.target?.result as string const text = e.target?.result as string;
setPreview(text.slice(0, 50) + (text.length > 50 ? "..." : "")) setPreview(text.slice(0, 50) + (text.length > 50 ? "..." : ""));
} };
reader.readAsText(file) reader.readAsText(file);
}, [file]) }, [file]);
return ( return (
<motion.div <motion.div
@ -111,10 +111,10 @@ const TextFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
</button> </button>
) : null} ) : null}
</motion.div> </motion.div>
) );
} }
) );
TextFilePreview.displayName = "TextFilePreview" TextFilePreview.displayName = "TextFilePreview";
const GenericFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>( const GenericFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
({ file, onRemove }, ref) => { ({ file, onRemove }, ref) => {
@ -147,7 +147,7 @@ const GenericFilePreview = React.forwardRef<HTMLDivElement, FilePreviewProps>(
</button> </button>
) : null} ) : null}
</motion.div> </motion.div>
) );
} }
) );
GenericFilePreview.displayName = "GenericFilePreview" GenericFilePreview.displayName = "GenericFilePreview";

View file

@ -11,7 +11,7 @@ function Input({ className, type, ...props }: React.ComponentProps<"input">) {
"file:text-foreground placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground dark:bg-input/30 border-input flex h-9 w-full min-w-0 rounded-md border bg-transparent px-3 py-1 text-base shadow-xs transition-[color,box-shadow] outline-none file:inline-flex file:h-7 file:border-0 file:bg-transparent file:text-sm file:font-medium disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm", "file:text-foreground placeholder:text-muted-foreground selection:bg-primary selection:text-primary-foreground dark:bg-input/30 border-input flex h-9 w-full min-w-0 rounded-md border bg-transparent px-3 py-1 text-base shadow-xs transition-[color,box-shadow] outline-none file:inline-flex file:h-7 file:border-0 file:bg-transparent file:text-sm file:font-medium disabled:pointer-events-none disabled:cursor-not-allowed disabled:opacity-50 md:text-sm",
"focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]", "focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px]",
"aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive", "aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -1,27 +1,27 @@
"use client" "use client";
import * as React from "react" import * as React from "react";
import * as SelectPrimitive from "@radix-ui/react-select" import * as SelectPrimitive from "@radix-ui/react-select";
import { CheckIcon, ChevronDownIcon, ChevronUpIcon } from "lucide-react" import { CheckIcon, ChevronDownIcon, ChevronUpIcon } from "lucide-react";
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils";
function Select({ function Select({
...props ...props
}: React.ComponentProps<typeof SelectPrimitive.Root>) { }: React.ComponentProps<typeof SelectPrimitive.Root>) {
return <SelectPrimitive.Root data-slot="select" {...props} /> return <SelectPrimitive.Root data-slot="select" {...props} />;
} }
function SelectGroup({ function SelectGroup({
...props ...props
}: React.ComponentProps<typeof SelectPrimitive.Group>) { }: React.ComponentProps<typeof SelectPrimitive.Group>) {
return <SelectPrimitive.Group data-slot="select-group" {...props} /> return <SelectPrimitive.Group data-slot="select-group" {...props} />;
} }
function SelectValue({ function SelectValue({
...props ...props
}: React.ComponentProps<typeof SelectPrimitive.Value>) { }: React.ComponentProps<typeof SelectPrimitive.Value>) {
return <SelectPrimitive.Value data-slot="select-value" {...props} /> return <SelectPrimitive.Value data-slot="select-value" {...props} />;
} }
function SelectTrigger({ function SelectTrigger({
@ -30,7 +30,7 @@ function SelectTrigger({
children, children,
...props ...props
}: React.ComponentProps<typeof SelectPrimitive.Trigger> & { }: React.ComponentProps<typeof SelectPrimitive.Trigger> & {
size?: "sm" | "default" size?: "sm" | "default";
}) { }) {
return ( return (
<SelectPrimitive.Trigger <SelectPrimitive.Trigger
@ -47,7 +47,7 @@ function SelectTrigger({
<ChevronDownIcon className="size-4 opacity-50" /> <ChevronDownIcon className="size-4 opacity-50" />
</SelectPrimitive.Icon> </SelectPrimitive.Icon>
</SelectPrimitive.Trigger> </SelectPrimitive.Trigger>
) );
} }
function SelectContent({ function SelectContent({
@ -82,7 +82,7 @@ function SelectContent({
<SelectScrollDownButton /> <SelectScrollDownButton />
</SelectPrimitive.Content> </SelectPrimitive.Content>
</SelectPrimitive.Portal> </SelectPrimitive.Portal>
) );
} }
function SelectLabel({ function SelectLabel({
@ -95,7 +95,7 @@ function SelectLabel({
className={cn("text-muted-foreground px-2 py-1.5 text-xs", className)} className={cn("text-muted-foreground px-2 py-1.5 text-xs", className)}
{...props} {...props}
/> />
) );
} }
function SelectItem({ function SelectItem({
@ -119,7 +119,7 @@ function SelectItem({
</span> </span>
<SelectPrimitive.ItemText>{children}</SelectPrimitive.ItemText> <SelectPrimitive.ItemText>{children}</SelectPrimitive.ItemText>
</SelectPrimitive.Item> </SelectPrimitive.Item>
) );
} }
function SelectSeparator({ function SelectSeparator({
@ -132,7 +132,7 @@ function SelectSeparator({
className={cn("bg-border pointer-events-none -mx-1 my-1 h-px", className)} className={cn("bg-border pointer-events-none -mx-1 my-1 h-px", className)}
{...props} {...props}
/> />
) );
} }
function SelectScrollUpButton({ function SelectScrollUpButton({
@ -150,7 +150,7 @@ function SelectScrollUpButton({
> >
<ChevronUpIcon className="size-4" /> <ChevronUpIcon className="size-4" />
</SelectPrimitive.ScrollUpButton> </SelectPrimitive.ScrollUpButton>
) );
} }
function SelectScrollDownButton({ function SelectScrollDownButton({
@ -168,7 +168,7 @@ function SelectScrollDownButton({
> >
<ChevronDownIcon className="size-4" /> <ChevronDownIcon className="size-4" />
</SelectPrimitive.ScrollDownButton> </SelectPrimitive.ScrollDownButton>
) );
} }
export { export {
@ -182,4 +182,4 @@ export {
SelectSeparator, SelectSeparator,
SelectTrigger, SelectTrigger,
SelectValue, SelectValue,
} };

View file

@ -18,7 +18,7 @@ function Separator({
orientation={orientation} orientation={orientation}
className={cn( className={cn(
"bg-border shrink-0 data-[orientation=horizontal]:h-px data-[orientation=horizontal]:w-full data-[orientation=vertical]:h-full data-[orientation=vertical]:w-px", "bg-border shrink-0 data-[orientation=horizontal]:h-px data-[orientation=horizontal]:w-full data-[orientation=vertical]:h-full data-[orientation=vertical]:w-px",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -37,7 +37,7 @@ function SheetOverlay({
data-slot="sheet-overlay" data-slot="sheet-overlay"
className={cn( className={cn(
"data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 fixed inset-0 z-50 bg-black/50", "data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 fixed inset-0 z-50 bg-black/50",
className, className
)} )}
{...props} {...props}
/> />
@ -67,7 +67,7 @@ function SheetContent({
"data-[state=closed]:slide-out-to-top data-[state=open]:slide-in-from-top inset-x-0 top-0 h-auto border-b", "data-[state=closed]:slide-out-to-top data-[state=open]:slide-in-from-top inset-x-0 top-0 h-auto border-b",
side === "bottom" && side === "bottom" &&
"data-[state=closed]:slide-out-to-bottom data-[state=open]:slide-in-from-bottom inset-x-0 bottom-0 h-auto border-t", "data-[state=closed]:slide-out-to-bottom data-[state=open]:slide-in-from-bottom inset-x-0 bottom-0 h-auto border-t",
className, className
)} )}
{...props} {...props}
> >

View file

@ -85,12 +85,12 @@ function SidebarProvider({
// This sets the cookie to keep the sidebar state. // This sets the cookie to keep the sidebar state.
document.cookie = `${SIDEBAR_COOKIE_NAME}=${openState}; path=/; max-age=${SIDEBAR_COOKIE_MAX_AGE}`; document.cookie = `${SIDEBAR_COOKIE_NAME}=${openState}; path=/; max-age=${SIDEBAR_COOKIE_MAX_AGE}`;
}, },
[setOpenProp, open], [setOpenProp, open]
); );
// Helper to toggle the sidebar. // Helper to toggle the sidebar.
const toggleSidebar = React.useCallback(() => { const toggleSidebar = React.useCallback(() => {
return isMobile ? setOpenMobile((open) => !open) : setOpen((open) => !open); return isMobile ? setOpenMobile(open => !open) : setOpen(open => !open);
}, [isMobile, setOpen, setOpenMobile]); }, [isMobile, setOpen, setOpenMobile]);
// Adds a keyboard shortcut to toggle the sidebar. // Adds a keyboard shortcut to toggle the sidebar.
@ -123,7 +123,7 @@ function SidebarProvider({
setOpenMobile, setOpenMobile,
toggleSidebar, toggleSidebar,
}), }),
[state, open, setOpen, isMobile, openMobile, setOpenMobile, toggleSidebar], [state, open, setOpen, isMobile, openMobile, setOpenMobile, toggleSidebar]
); );
return ( return (
@ -140,7 +140,7 @@ function SidebarProvider({
} }
className={cn( className={cn(
"group/sidebar-wrapper has-data-[variant=inset]:bg-sidebar flex min-h-svh w-full", "group/sidebar-wrapper has-data-[variant=inset]:bg-sidebar flex min-h-svh w-full",
className, className
)} )}
{...props} {...props}
> >
@ -171,7 +171,7 @@ function Sidebar({
data-slot="sidebar" data-slot="sidebar"
className={cn( className={cn(
"bg-sidebar text-sidebar-foreground flex h-full w-(--sidebar-width) flex-col", "bg-sidebar text-sidebar-foreground flex h-full w-(--sidebar-width) flex-col",
className, className
)} )}
{...props} {...props}
> >
@ -223,7 +223,7 @@ function Sidebar({
"group-data-[side=right]:rotate-180", "group-data-[side=right]:rotate-180",
variant === "floating" || variant === "inset" variant === "floating" || variant === "inset"
? "group-data-[collapsible=icon]:w-[calc(var(--sidebar-width-icon)+(--spacing(4)))]" ? "group-data-[collapsible=icon]:w-[calc(var(--sidebar-width-icon)+(--spacing(4)))]"
: "group-data-[collapsible=icon]:w-(--sidebar-width-icon)", : "group-data-[collapsible=icon]:w-(--sidebar-width-icon)"
)} )}
/> />
<div <div
@ -237,7 +237,7 @@ function Sidebar({
variant === "floating" || variant === "inset" variant === "floating" || variant === "inset"
? "p-2 group-data-[collapsible=icon]:w-[calc(var(--sidebar-width-icon)+(--spacing(4))+2px)]" ? "p-2 group-data-[collapsible=icon]:w-[calc(var(--sidebar-width-icon)+(--spacing(4))+2px)]"
: "group-data-[collapsible=icon]:w-(--sidebar-width-icon) group-data-[side=left]:border-r group-data-[side=right]:border-l", : "group-data-[collapsible=icon]:w-(--sidebar-width-icon) group-data-[side=left]:border-r group-data-[side=right]:border-l",
className, className
)} )}
{...props} {...props}
> >
@ -267,7 +267,7 @@ function SidebarTrigger({
variant="ghost" variant="ghost"
size="icon" size="icon"
className={cn("size-7", className)} className={cn("size-7", className)}
onClick={(event) => { onClick={event => {
onClick?.(event); onClick?.(event);
toggleSidebar(); toggleSidebar();
}} }}
@ -297,7 +297,7 @@ function SidebarRail({ className, ...props }: React.ComponentProps<"button">) {
"hover:group-data-[collapsible=offcanvas]:bg-sidebar group-data-[collapsible=offcanvas]:translate-x-0 group-data-[collapsible=offcanvas]:after:left-full", "hover:group-data-[collapsible=offcanvas]:bg-sidebar group-data-[collapsible=offcanvas]:translate-x-0 group-data-[collapsible=offcanvas]:after:left-full",
"[[data-side=left][data-collapsible=offcanvas]_&]:-right-2", "[[data-side=left][data-collapsible=offcanvas]_&]:-right-2",
"[[data-side=right][data-collapsible=offcanvas]_&]:-left-2", "[[data-side=right][data-collapsible=offcanvas]_&]:-left-2",
className, className
)} )}
{...props} {...props}
/> />
@ -311,7 +311,7 @@ function SidebarInset({ className, ...props }: React.ComponentProps<"main">) {
className={cn( className={cn(
"bg-background relative flex w-full flex-1 flex-col", "bg-background relative flex w-full flex-1 flex-col",
"md:peer-data-[variant=inset]:m-2 md:peer-data-[variant=inset]:ml-0 md:peer-data-[variant=inset]:rounded-xl md:peer-data-[variant=inset]:shadow-sm md:peer-data-[variant=inset]:peer-data-[state=collapsed]:ml-2", "md:peer-data-[variant=inset]:m-2 md:peer-data-[variant=inset]:ml-0 md:peer-data-[variant=inset]:rounded-xl md:peer-data-[variant=inset]:shadow-sm md:peer-data-[variant=inset]:peer-data-[state=collapsed]:ml-2",
className, className
)} )}
{...props} {...props}
/> />
@ -375,7 +375,7 @@ function SidebarContent({ className, ...props }: React.ComponentProps<"div">) {
data-sidebar="content" data-sidebar="content"
className={cn( className={cn(
"flex min-h-0 flex-1 flex-col gap-2 overflow-auto group-data-[collapsible=icon]:overflow-hidden", "flex min-h-0 flex-1 flex-col gap-2 overflow-auto group-data-[collapsible=icon]:overflow-hidden",
className, className
)} )}
{...props} {...props}
/> />
@ -407,7 +407,7 @@ function SidebarGroupLabel({
className={cn( className={cn(
"text-sidebar-foreground/70 ring-sidebar-ring flex h-8 shrink-0 items-center rounded-md px-2 text-xs font-medium outline-hidden transition-[margin,opacity] duration-200 ease-linear focus-visible:ring-2 [&>svg]:size-4 [&>svg]:shrink-0", "text-sidebar-foreground/70 ring-sidebar-ring flex h-8 shrink-0 items-center rounded-md px-2 text-xs font-medium outline-hidden transition-[margin,opacity] duration-200 ease-linear focus-visible:ring-2 [&>svg]:size-4 [&>svg]:shrink-0",
"group-data-[collapsible=icon]:-mt-8 group-data-[collapsible=icon]:opacity-0", "group-data-[collapsible=icon]:-mt-8 group-data-[collapsible=icon]:opacity-0",
className, className
)} )}
{...props} {...props}
/> />
@ -430,7 +430,7 @@ function SidebarGroupAction({
// Increases the hit area of the button on mobile. // Increases the hit area of the button on mobile.
"after:absolute after:-inset-2 md:after:hidden", "after:absolute after:-inset-2 md:after:hidden",
"group-data-[collapsible=icon]:hidden", "group-data-[collapsible=icon]:hidden",
className, className
)} )}
{...props} {...props}
/> />
@ -492,7 +492,7 @@ const sidebarMenuButtonVariants = cva(
variant: "default", variant: "default",
size: "default", size: "default",
}, },
}, }
); );
function SidebarMenuButton({ function SidebarMenuButton({
@ -570,7 +570,7 @@ function SidebarMenuAction({
"group-data-[collapsible=icon]:hidden", "group-data-[collapsible=icon]:hidden",
showOnHover && showOnHover &&
"peer-data-[active=true]/menu-button:text-sidebar-accent-foreground group-focus-within/menu-item:opacity-100 group-hover/menu-item:opacity-100 data-[state=open]:opacity-100 md:opacity-0", "peer-data-[active=true]/menu-button:text-sidebar-accent-foreground group-focus-within/menu-item:opacity-100 group-hover/menu-item:opacity-100 data-[state=open]:opacity-100 md:opacity-0",
className, className
)} )}
{...props} {...props}
/> />
@ -592,7 +592,7 @@ function SidebarMenuBadge({
"peer-data-[size=default]/menu-button:top-1.5", "peer-data-[size=default]/menu-button:top-1.5",
"peer-data-[size=lg]/menu-button:top-2.5", "peer-data-[size=lg]/menu-button:top-2.5",
"group-data-[collapsible=icon]:hidden", "group-data-[collapsible=icon]:hidden",
className, className
)} )}
{...props} {...props}
/> />
@ -645,7 +645,7 @@ function SidebarMenuSub({ className, ...props }: React.ComponentProps<"ul">) {
className={cn( className={cn(
"border-sidebar-border mx-3.5 flex min-w-0 translate-x-px flex-col gap-1 border-l px-2.5 py-0.5", "border-sidebar-border mx-3.5 flex min-w-0 translate-x-px flex-col gap-1 border-l px-2.5 py-0.5",
"group-data-[collapsible=icon]:hidden", "group-data-[collapsible=icon]:hidden",
className, className
)} )}
{...props} {...props}
/> />
@ -691,7 +691,7 @@ function SidebarMenuSubButton({
size === "sm" && "text-xs", size === "sm" && "text-xs",
size === "md" && "text-sm", size === "md" && "text-sm",
"group-data-[collapsible=icon]:hidden", "group-data-[collapsible=icon]:hidden",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -1,10 +1,10 @@
"use client" "use client";
import { useTheme } from "next-themes" import { useTheme } from "next-themes";
import { Toaster as Sonner, ToasterProps } from "sonner" import { Toaster as Sonner, ToasterProps } from "sonner";
const Toaster = ({ ...props }: ToasterProps) => { const Toaster = ({ ...props }: ToasterProps) => {
const { theme = "system" } = useTheme() const { theme = "system" } = useTheme();
return ( return (
<Sonner <Sonner
@ -19,7 +19,7 @@ const Toaster = ({ ...props }: ToasterProps) => {
} }
{...props} {...props}
/> />
) );
} };
export { Toaster } export { Toaster };

View file

@ -45,7 +45,7 @@ function TableFooter({ className, ...props }: React.ComponentProps<"tfoot">) {
data-slot="table-footer" data-slot="table-footer"
className={cn( className={cn(
"bg-muted/50 border-t font-medium [&>tr]:last:border-b-0", "bg-muted/50 border-t font-medium [&>tr]:last:border-b-0",
className, className
)} )}
{...props} {...props}
/> />
@ -58,7 +58,7 @@ function TableRow({ className, ...props }: React.ComponentProps<"tr">) {
data-slot="table-row" data-slot="table-row"
className={cn( className={cn(
"hover:bg-muted/50 data-[state=selected]:bg-muted border-b transition-colors", "hover:bg-muted/50 data-[state=selected]:bg-muted border-b transition-colors",
className, className
)} )}
{...props} {...props}
/> />
@ -71,7 +71,7 @@ function TableHead({ className, ...props }: React.ComponentProps<"th">) {
data-slot="table-head" data-slot="table-head"
className={cn( className={cn(
"text-foreground h-10 px-2 text-left align-middle font-medium whitespace-nowrap [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]", "text-foreground h-10 px-2 text-left align-middle font-medium whitespace-nowrap [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className, className
)} )}
{...props} {...props}
/> />
@ -84,7 +84,7 @@ function TableCell({ className, ...props }: React.ComponentProps<"td">) {
data-slot="table-cell" data-slot="table-cell"
className={cn( className={cn(
"p-2 align-middle whitespace-nowrap [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]", "p-2 align-middle whitespace-nowrap [&:has([role=checkbox])]:pr-0 [&>[role=checkbox]]:translate-y-[2px]",
className, className
)} )}
{...props} {...props}
/> />

View file

@ -47,7 +47,7 @@ function TooltipContent({
sideOffset={sideOffset} sideOffset={sideOffset}
className={cn( className={cn(
"bg-primary text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 w-fit origin-(--radix-tooltip-content-transform-origin) rounded-md px-3 py-1.5 text-xs text-balance", "bg-primary text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 w-fit origin-(--radix-tooltip-content-transform-origin) rounded-md px-3 py-1.5 text-xs text-balance",
className, className
)} )}
{...props} {...props}
> >

View file

@ -85,7 +85,7 @@ export function VectorStoreDetailView({
</TableRow> </TableRow>
</TableHeader> </TableHeader>
<TableBody> <TableBody>
{files.map((file) => ( {files.map(file => (
<TableRow key={file.id}> <TableRow key={file.id}>
<TableCell> <TableCell>
<Button <Button

View file

@ -45,7 +45,7 @@ test.describe("LogsTable Scroll and Progressive Loading", () => {
const scrollContainer = page.locator("div.overflow-auto").first(); const scrollContainer = page.locator("div.overflow-auto").first();
// Scroll to near the bottom // Scroll to near the bottom
await scrollContainer.evaluate((element) => { await scrollContainer.evaluate(element => {
element.scrollTop = element.scrollHeight - element.clientHeight - 100; element.scrollTop = element.scrollHeight - element.clientHeight - 100;
}); });

View file

@ -10,7 +10,13 @@ const compat = new FlatCompat({
}); });
const eslintConfig = [ const eslintConfig = [
...compat.extends("next/core-web-vitals", "next/typescript"), ...compat.extends("next/core-web-vitals", "next/typescript", "prettier"),
...compat.plugins("prettier"),
{
rules: {
"prettier/prettier": "error",
},
},
]; ];
export default eslintConfig; export default eslintConfig;

View file

@ -1,85 +1,85 @@
import { useEffect, useRef, useState } from "react" import { useEffect, useRef, useState } from "react";
import { recordAudio } from "@/lib/audio-utils" import { recordAudio } from "@/lib/audio-utils";
interface UseAudioRecordingOptions { interface UseAudioRecordingOptions {
transcribeAudio?: (blob: Blob) => Promise<string> transcribeAudio?: (blob: Blob) => Promise<string>;
onTranscriptionComplete?: (text: string) => void onTranscriptionComplete?: (text: string) => void;
} }
export function useAudioRecording({ export function useAudioRecording({
transcribeAudio, transcribeAudio,
onTranscriptionComplete, onTranscriptionComplete,
}: UseAudioRecordingOptions) { }: UseAudioRecordingOptions) {
const [isListening, setIsListening] = useState(false) const [isListening, setIsListening] = useState(false);
const [isSpeechSupported, setIsSpeechSupported] = useState(!!transcribeAudio) const [isSpeechSupported, setIsSpeechSupported] = useState(!!transcribeAudio);
const [isRecording, setIsRecording] = useState(false) const [isRecording, setIsRecording] = useState(false);
const [isTranscribing, setIsTranscribing] = useState(false) const [isTranscribing, setIsTranscribing] = useState(false);
const [audioStream, setAudioStream] = useState<MediaStream | null>(null) const [audioStream, setAudioStream] = useState<MediaStream | null>(null);
const activeRecordingRef = useRef<any>(null) const activeRecordingRef = useRef<any>(null);
useEffect(() => { useEffect(() => {
const checkSpeechSupport = async () => { const checkSpeechSupport = async () => {
const hasMediaDevices = !!( const hasMediaDevices = !!(
navigator.mediaDevices && navigator.mediaDevices.getUserMedia navigator.mediaDevices && navigator.mediaDevices.getUserMedia
) );
setIsSpeechSupported(hasMediaDevices && !!transcribeAudio) setIsSpeechSupported(hasMediaDevices && !!transcribeAudio);
} };
checkSpeechSupport() checkSpeechSupport();
}, [transcribeAudio]) }, [transcribeAudio]);
const stopRecording = async () => { const stopRecording = async () => {
setIsRecording(false) setIsRecording(false);
setIsTranscribing(true) setIsTranscribing(true);
try { try {
// First stop the recording to get the final blob // First stop the recording to get the final blob
recordAudio.stop() recordAudio.stop();
// Wait for the recording promise to resolve with the final blob // Wait for the recording promise to resolve with the final blob
const recording = await activeRecordingRef.current const recording = await activeRecordingRef.current;
if (transcribeAudio) { if (transcribeAudio) {
const text = await transcribeAudio(recording) const text = await transcribeAudio(recording);
onTranscriptionComplete?.(text) onTranscriptionComplete?.(text);
} }
} catch (error) { } catch (error) {
console.error("Error transcribing audio:", error) console.error("Error transcribing audio:", error);
} finally { } finally {
setIsTranscribing(false) setIsTranscribing(false);
setIsListening(false) setIsListening(false);
if (audioStream) { if (audioStream) {
audioStream.getTracks().forEach((track) => track.stop()) audioStream.getTracks().forEach(track => track.stop());
setAudioStream(null) setAudioStream(null);
}
activeRecordingRef.current = null
} }
activeRecordingRef.current = null;
} }
};
const toggleListening = async () => { const toggleListening = async () => {
if (!isListening) { if (!isListening) {
try { try {
setIsListening(true) setIsListening(true);
setIsRecording(true) setIsRecording(true);
// Get audio stream first // Get audio stream first
const stream = await navigator.mediaDevices.getUserMedia({ const stream = await navigator.mediaDevices.getUserMedia({
audio: true, audio: true,
}) });
setAudioStream(stream) setAudioStream(stream);
// Start recording with the stream // Start recording with the stream
activeRecordingRef.current = recordAudio(stream) activeRecordingRef.current = recordAudio(stream);
} catch (error) { } catch (error) {
console.error("Error recording audio:", error) console.error("Error recording audio:", error);
setIsListening(false) setIsListening(false);
setIsRecording(false) setIsRecording(false);
if (audioStream) { if (audioStream) {
audioStream.getTracks().forEach((track) => track.stop()) audioStream.getTracks().forEach(track => track.stop());
setAudioStream(null) setAudioStream(null);
} }
} }
} else { } else {
await stopRecording() await stopRecording();
}
} }
};
return { return {
isListening, isListening,
@ -89,5 +89,5 @@ export function useAudioRecording({
audioStream, audioStream,
toggleListening, toggleListening,
stopRecording, stopRecording,
} };
} }

View file

@ -1,67 +1,67 @@
import { useEffect, useRef, useState } from "react" import { useEffect, useRef, useState } from "react";
// How many pixels from the bottom of the container to enable auto-scroll // How many pixels from the bottom of the container to enable auto-scroll
const ACTIVATION_THRESHOLD = 50 const ACTIVATION_THRESHOLD = 50;
// Minimum pixels of scroll-up movement required to disable auto-scroll // Minimum pixels of scroll-up movement required to disable auto-scroll
const MIN_SCROLL_UP_THRESHOLD = 10 const MIN_SCROLL_UP_THRESHOLD = 10;
export function useAutoScroll(dependencies: React.DependencyList) { export function useAutoScroll(dependencies: React.DependencyList) {
const containerRef = useRef<HTMLDivElement | null>(null) const containerRef = useRef<HTMLDivElement | null>(null);
const previousScrollTop = useRef<number | null>(null) const previousScrollTop = useRef<number | null>(null);
const [shouldAutoScroll, setShouldAutoScroll] = useState(true) const [shouldAutoScroll, setShouldAutoScroll] = useState(true);
const scrollToBottom = () => { const scrollToBottom = () => {
if (containerRef.current) { if (containerRef.current) {
containerRef.current.scrollTop = containerRef.current.scrollHeight containerRef.current.scrollTop = containerRef.current.scrollHeight;
}
} }
};
const handleScroll = () => { const handleScroll = () => {
if (containerRef.current) { if (containerRef.current) {
const { scrollTop, scrollHeight, clientHeight } = containerRef.current const { scrollTop, scrollHeight, clientHeight } = containerRef.current;
const distanceFromBottom = Math.abs( const distanceFromBottom = Math.abs(
scrollHeight - scrollTop - clientHeight scrollHeight - scrollTop - clientHeight
) );
const isScrollingUp = previousScrollTop.current const isScrollingUp = previousScrollTop.current
? scrollTop < previousScrollTop.current ? scrollTop < previousScrollTop.current
: false : false;
const scrollUpDistance = previousScrollTop.current const scrollUpDistance = previousScrollTop.current
? previousScrollTop.current - scrollTop ? previousScrollTop.current - scrollTop
: 0 : 0;
const isDeliberateScrollUp = const isDeliberateScrollUp =
isScrollingUp && scrollUpDistance > MIN_SCROLL_UP_THRESHOLD isScrollingUp && scrollUpDistance > MIN_SCROLL_UP_THRESHOLD;
if (isDeliberateScrollUp) { if (isDeliberateScrollUp) {
setShouldAutoScroll(false) setShouldAutoScroll(false);
} else { } else {
const isScrolledToBottom = distanceFromBottom < ACTIVATION_THRESHOLD const isScrolledToBottom = distanceFromBottom < ACTIVATION_THRESHOLD;
setShouldAutoScroll(isScrolledToBottom) setShouldAutoScroll(isScrolledToBottom);
} }
previousScrollTop.current = scrollTop previousScrollTop.current = scrollTop;
}
} }
};
const handleTouchStart = () => { const handleTouchStart = () => {
setShouldAutoScroll(false) setShouldAutoScroll(false);
} };
useEffect(() => { useEffect(() => {
if (containerRef.current) { if (containerRef.current) {
previousScrollTop.current = containerRef.current.scrollTop previousScrollTop.current = containerRef.current.scrollTop;
} }
}, []) }, []);
useEffect(() => { useEffect(() => {
if (shouldAutoScroll) { if (shouldAutoScroll) {
scrollToBottom() scrollToBottom();
} }
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, dependencies) }, dependencies);
return { return {
containerRef, containerRef,
@ -69,5 +69,5 @@ export function useAutoScroll(dependencies: React.DependencyList) {
handleScroll, handleScroll,
shouldAutoScroll, shouldAutoScroll,
handleTouchStart, handleTouchStart,
} };
} }

View file

@ -1,10 +1,10 @@
import { useLayoutEffect, useRef } from "react" import { useLayoutEffect, useRef } from "react";
interface UseAutosizeTextAreaProps { interface UseAutosizeTextAreaProps {
ref: React.RefObject<HTMLTextAreaElement | null> ref: React.RefObject<HTMLTextAreaElement | null>;
maxHeight?: number maxHeight?: number;
borderWidth?: number borderWidth?: number;
dependencies: React.DependencyList dependencies: React.DependencyList;
} }
export function useAutosizeTextArea({ export function useAutosizeTextArea({
@ -13,27 +13,27 @@ export function useAutosizeTextArea({
borderWidth = 0, borderWidth = 0,
dependencies, dependencies,
}: UseAutosizeTextAreaProps) { }: UseAutosizeTextAreaProps) {
const originalHeight = useRef<number | null>(null) const originalHeight = useRef<number | null>(null);
useLayoutEffect(() => { useLayoutEffect(() => {
if (!ref.current) return if (!ref.current) return;
const currentRef = ref.current const currentRef = ref.current;
const borderAdjustment = borderWidth * 2 const borderAdjustment = borderWidth * 2;
if (originalHeight.current === null) { if (originalHeight.current === null) {
originalHeight.current = currentRef.scrollHeight - borderAdjustment originalHeight.current = currentRef.scrollHeight - borderAdjustment;
} }
currentRef.style.removeProperty("height") currentRef.style.removeProperty("height");
const scrollHeight = currentRef.scrollHeight const scrollHeight = currentRef.scrollHeight;
// Make sure we don't go over maxHeight // Make sure we don't go over maxHeight
const clampedToMax = Math.min(scrollHeight, maxHeight) const clampedToMax = Math.min(scrollHeight, maxHeight);
// Make sure we don't go less than the original height // Make sure we don't go less than the original height
const clampedToMin = Math.max(clampedToMax, originalHeight.current) const clampedToMin = Math.max(clampedToMax, originalHeight.current);
currentRef.style.height = `${clampedToMin + borderAdjustment}px` currentRef.style.height = `${clampedToMin + borderAdjustment}px`;
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [maxHeight, ref, ...dependencies]) }, [maxHeight, ref, ...dependencies]);
} }

View file

@ -1,36 +1,36 @@
import { useCallback, useRef, useState } from "react" import { useCallback, useRef, useState } from "react";
import { toast } from "sonner" import { toast } from "sonner";
type UseCopyToClipboardProps = { type UseCopyToClipboardProps = {
text: string text: string;
copyMessage?: string copyMessage?: string;
} };
export function useCopyToClipboard({ export function useCopyToClipboard({
text, text,
copyMessage = "Copied to clipboard!", copyMessage = "Copied to clipboard!",
}: UseCopyToClipboardProps) { }: UseCopyToClipboardProps) {
const [isCopied, setIsCopied] = useState(false) const [isCopied, setIsCopied] = useState(false);
const timeoutRef = useRef<NodeJS.Timeout | null>(null) const timeoutRef = useRef<NodeJS.Timeout | null>(null);
const handleCopy = useCallback(() => { const handleCopy = useCallback(() => {
navigator.clipboard navigator.clipboard
.writeText(text) .writeText(text)
.then(() => { .then(() => {
toast.success(copyMessage) toast.success(copyMessage);
setIsCopied(true) setIsCopied(true);
if (timeoutRef.current) { if (timeoutRef.current) {
clearTimeout(timeoutRef.current) clearTimeout(timeoutRef.current);
timeoutRef.current = null timeoutRef.current = null;
} }
timeoutRef.current = setTimeout(() => { timeoutRef.current = setTimeout(() => {
setIsCopied(false) setIsCopied(false);
}, 2000) }, 2000);
}) })
.catch(() => { .catch(() => {
toast.error("Failed to copy to clipboard.") toast.error("Failed to copy to clipboard.");
}) });
}, [text, copyMessage]) }, [text, copyMessage]);
return { isCopied, handleCopy } return { isCopied, handleCopy };
} }

View file

@ -20,7 +20,7 @@ interface UseInfiniteScrollOptions {
*/ */
export function useInfiniteScroll( export function useInfiniteScroll(
onLoadMore: (() => void) | undefined, onLoadMore: (() => void) | undefined,
options: UseInfiniteScrollOptions = {}, options: UseInfiniteScrollOptions = {}
) { ) {
const { enabled = true, threshold = 0.1, rootMargin = "100px" } = options; const { enabled = true, threshold = 0.1, rootMargin = "100px" } = options;
const sentinelRef = useRef<HTMLTableRowElement>(null); const sentinelRef = useRef<HTMLTableRowElement>(null);
@ -29,7 +29,7 @@ export function useInfiniteScroll(
if (!onLoadMore || !enabled) return; if (!onLoadMore || !enabled) return;
const observer = new IntersectionObserver( const observer = new IntersectionObserver(
(entries) => { entries => {
const [entry] = entries; const [entry] = entries;
if (entry.isIntersecting) { if (entry.isIntersecting) {
onLoadMore(); onLoadMore();
@ -38,7 +38,7 @@ export function useInfiniteScroll(
{ {
threshold, threshold,
rootMargin, rootMargin,
}, }
); );
const sentinel = sentinelRef.current; const sentinel = sentinelRef.current;

View file

@ -4,7 +4,7 @@ const MOBILE_BREAKPOINT = 768;
export function useIsMobile() { export function useIsMobile() {
const [isMobile, setIsMobile] = React.useState<boolean | undefined>( const [isMobile, setIsMobile] = React.useState<boolean | undefined>(
undefined, undefined
); );
React.useEffect(() => { React.useEffect(() => {

View file

@ -38,7 +38,7 @@ interface UsePaginationParams<T> extends UsePaginationOptions {
limit: number; limit: number;
model?: string; model?: string;
order?: string; order?: string;
}, }
) => Promise<PaginationResponse<T>>; ) => Promise<PaginationResponse<T>>;
errorMessagePrefix: string; errorMessagePrefix: string;
enabled?: boolean; enabled?: boolean;
@ -81,7 +81,7 @@ export function usePagination<T>({
const fetchLimit = targetRows || limit; const fetchLimit = targetRows || limit;
try { try {
setState((prev) => ({ setState(prev => ({
...prev, ...prev,
status: isInitialLoad ? "loading" : "loading-more", status: isInitialLoad ? "loading" : "loading-more",
error: null, error: null,
@ -94,7 +94,7 @@ export function usePagination<T>({
...(order && { order }), ...(order && { order }),
}); });
setState((prev) => ({ setState(prev => ({
...prev, ...prev,
data: isInitialLoad data: isInitialLoad
? response.data ? response.data
@ -124,14 +124,14 @@ export function usePagination<T>({
? new Error(`${errorMessage} ${err.message}`) ? new Error(`${errorMessage} ${err.message}`)
: new Error(errorMessage); : new Error(errorMessage);
setState((prev) => ({ setState(prev => ({
...prev, ...prev,
error, error,
status: "error", status: "error",
})); }));
} }
}, },
[limit, model, order, fetchFunction, errorMessagePrefix, client, router], [limit, model, order, fetchFunction, errorMessagePrefix, client, router]
); );
/** /**

View file

@ -1,50 +1,50 @@
type RecordAudioType = { type RecordAudioType = {
(stream: MediaStream): Promise<Blob> (stream: MediaStream): Promise<Blob>;
stop: () => void stop: () => void;
currentRecorder?: MediaRecorder currentRecorder?: MediaRecorder;
} };
export const recordAudio = (function (): RecordAudioType { export const recordAudio = (function (): RecordAudioType {
const func = async function recordAudio(stream: MediaStream): Promise<Blob> { const func = async function recordAudio(stream: MediaStream): Promise<Blob> {
try { try {
const mediaRecorder = new MediaRecorder(stream, { const mediaRecorder = new MediaRecorder(stream, {
mimeType: "audio/webm;codecs=opus", mimeType: "audio/webm;codecs=opus",
}) });
const audioChunks: Blob[] = [] const audioChunks: Blob[] = [];
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
mediaRecorder.ondataavailable = (event) => { mediaRecorder.ondataavailable = event => {
if (event.data.size > 0) { if (event.data.size > 0) {
audioChunks.push(event.data) audioChunks.push(event.data);
}
} }
};
mediaRecorder.onstop = () => { mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunks, { type: "audio/webm" }) const audioBlob = new Blob(audioChunks, { type: "audio/webm" });
resolve(audioBlob) resolve(audioBlob);
} };
mediaRecorder.onerror = () => { mediaRecorder.onerror = () => {
reject(new Error("MediaRecorder error occurred")) reject(new Error("MediaRecorder error occurred"));
} };
mediaRecorder.start(1000) mediaRecorder.start(1000);
;(func as RecordAudioType).currentRecorder = mediaRecorder (func as RecordAudioType).currentRecorder = mediaRecorder;
}) });
} catch (error) { } catch (error) {
const errorMessage = const errorMessage =
error instanceof Error ? error.message : "Unknown error occurred" error instanceof Error ? error.message : "Unknown error occurred";
throw new Error("Failed to start recording: " + errorMessage) throw new Error("Failed to start recording: " + errorMessage);
}
} }
};
;(func as RecordAudioType).stop = () => { (func as RecordAudioType).stop = () => {
const recorder = (func as RecordAudioType).currentRecorder const recorder = (func as RecordAudioType).currentRecorder;
if (recorder && recorder.state !== "inactive") { if (recorder && recorder.state !== "inactive") {
recorder.stop() recorder.stop();
}
delete (func as RecordAudioType).currentRecorder
} }
delete (func as RecordAudioType).currentRecorder;
};
return func as RecordAudioType return func as RecordAudioType;
})() })();

View file

@ -27,19 +27,19 @@ export function validateServerConfig() {
!optionalConfigs.GITHUB_CLIENT_SECRET !optionalConfigs.GITHUB_CLIENT_SECRET
) { ) {
console.log( console.log(
"\n📝 GitHub OAuth not configured (authentication features disabled)", "\n📝 GitHub OAuth not configured (authentication features disabled)"
); );
console.log(" To enable GitHub OAuth:"); console.log(" To enable GitHub OAuth:");
console.log(" 1. Go to https://github.com/settings/applications/new"); console.log(" 1. Go to https://github.com/settings/applications/new");
console.log( console.log(
" 2. Set Application name: Llama Stack UI (or your preferred name)", " 2. Set Application name: Llama Stack UI (or your preferred name)"
); );
console.log(" 3. Set Homepage URL: http://localhost:8322"); console.log(" 3. Set Homepage URL: http://localhost:8322");
console.log( console.log(
" 4. Set Authorization callback URL: http://localhost:8322/api/auth/callback/github", " 4. Set Authorization callback URL: http://localhost:8322/api/auth/callback/github"
); );
console.log( console.log(
" 5. Create the app and copy the Client ID and Client Secret", " 5. Create the app and copy the Client ID and Client Secret"
); );
console.log(" 6. Add them to your .env.local file:"); console.log(" 6. Add them to your .env.local file:");
console.log(" GITHUB_CLIENT_ID=your_client_id"); console.log(" GITHUB_CLIENT_ID=your_client_id");

View file

@ -11,7 +11,7 @@ export interface VectorStoreContentItem {
vector_store_id: string; vector_store_id: string;
file_id: string; file_id: string;
content: VectorStoreContent; content: VectorStoreContent;
metadata: Record<string, any>; metadata: Record<string, unknown>;
embedding?: number[]; embedding?: number[];
} }
@ -32,11 +32,18 @@ export interface VectorStoreListContentsResponse {
export class ContentsAPI { export class ContentsAPI {
constructor(private client: LlamaStackClient) {} constructor(private client: LlamaStackClient) {}
async getFileContents(vectorStoreId: string, fileId: string): Promise<VectorStoreContentsResponse> { async getFileContents(
vectorStoreId: string,
fileId: string
): Promise<VectorStoreContentsResponse> {
return this.client.vectorStores.files.content(vectorStoreId, fileId); return this.client.vectorStores.files.content(vectorStoreId, fileId);
} }
async getContent(vectorStoreId: string, fileId: string, contentId: string): Promise<VectorStoreContentItem> { async getContent(
vectorStoreId: string,
fileId: string,
contentId: string
): Promise<VectorStoreContentItem> {
const contentsResponse = await this.listContents(vectorStoreId, fileId); const contentsResponse = await this.listContents(vectorStoreId, fileId);
const targetContent = contentsResponse.data.find(c => c.id === contentId); const targetContent = contentsResponse.data.find(c => c.id === contentId);
@ -47,16 +54,11 @@ export class ContentsAPI {
return targetContent; return targetContent;
} }
async updateContent( async updateContent(): Promise<VectorStoreContentItem> {
vectorStoreId: string,
fileId: string,
contentId: string,
updates: { content?: string; metadata?: Record<string, any> }
): Promise<VectorStoreContentItem> {
throw new Error("Individual content updates not yet implemented in API"); throw new Error("Individual content updates not yet implemented in API");
} }
async deleteContent(vectorStoreId: string, fileId: string, contentId: string): Promise<VectorStoreContentDeleteResponse> { async deleteContent(): Promise<VectorStoreContentDeleteResponse> {
throw new Error("Individual content deletion not yet implemented in API"); throw new Error("Individual content deletion not yet implemented in API");
} }
@ -70,18 +72,27 @@ export class ContentsAPI {
before?: string; before?: string;
} }
): Promise<VectorStoreListContentsResponse> { ): Promise<VectorStoreListContentsResponse> {
const fileContents = await this.client.vectorStores.files.content(vectorStoreId, fileId); const fileContents = await this.client.vectorStores.files.content(
vectorStoreId,
fileId
);
const contentItems: VectorStoreContentItem[] = []; const contentItems: VectorStoreContentItem[] = [];
fileContents.content.forEach((content, contentIndex) => { fileContents.content.forEach((content, contentIndex) => {
const rawContent = content as any; const rawContent = content as Record<string, unknown>;
// Extract actual fields from the API response // Extract actual fields from the API response
const embedding = rawContent.embedding || undefined; const embedding = rawContent.embedding || undefined;
const created_timestamp = rawContent.created_timestamp || rawContent.created_at || Date.now() / 1000; const created_timestamp =
rawContent.created_timestamp ||
rawContent.created_at ||
Date.now() / 1000;
const chunkMetadata = rawContent.chunk_metadata || {}; const chunkMetadata = rawContent.chunk_metadata || {};
const contentId = rawContent.chunk_metadata?.chunk_id || rawContent.id || `content_${fileId}_${contentIndex}`; const contentId =
const objectType = rawContent.object || 'vector_store.file.content'; rawContent.chunk_metadata?.chunk_id ||
rawContent.id ||
`content_${fileId}_${contentIndex}`;
const objectType = rawContent.object || "vector_store.file.content";
contentItems.push({ contentItems.push({
id: contentId, id: contentId,
object: objectType, object: objectType,
@ -92,7 +103,7 @@ export class ContentsAPI {
embedding: embedding, embedding: embedding,
metadata: { metadata: {
...chunkMetadata, // chunk_metadata fields from API ...chunkMetadata, // chunk_metadata fields from API
content_length: content.type === 'text' ? content.text.length : 0, content_length: content.type === "text" ? content.text.length : 0,
}, },
}); });
}); });
@ -104,7 +115,7 @@ export class ContentsAPI {
} }
return { return {
object: 'list', object: "list",
data: filteredItems, data: filteredItems,
has_more: contentItems.length > (options?.limit || contentItems.length), has_more: contentItems.length > (options?.limit || contentItems.length),
}; };

View file

@ -18,7 +18,7 @@ describe("extractTextFromContentPart", () => {
it("should extract text from an array of text content objects", () => { it("should extract text from an array of text content objects", () => {
const content = [{ type: "text", text: "Which planet do humans live on?" }]; const content = [{ type: "text", text: "Which planet do humans live on?" }];
expect(extractTextFromContentPart(content)).toBe( expect(extractTextFromContentPart(content)).toBe(
"Which planet do humans live on?", "Which planet do humans live on?"
); );
}); });
@ -37,7 +37,7 @@ describe("extractTextFromContentPart", () => {
{ type: "text", text: "It's an image." }, { type: "text", text: "It's an image." },
]; ];
expect(extractTextFromContentPart(content)).toBe( expect(extractTextFromContentPart(content)).toBe(
"Look at this: [Image] It's an image.", "Look at this: [Image] It's an image."
); );
}); });
@ -53,7 +53,7 @@ describe("extractTextFromContentPart", () => {
}); });
it("should handle arrays with plain strings", () => { it("should handle arrays with plain strings", () => {
const content = ["This is", " a test."] as any; const content = ["This is", " a test."] as unknown;
expect(extractTextFromContentPart(content)).toBe("This is a test."); expect(extractTextFromContentPart(content)).toBe("This is a test.");
}); });
@ -65,7 +65,7 @@ describe("extractTextFromContentPart", () => {
null, null,
undefined, undefined,
{ type: "text", noTextProperty: true }, { type: "text", noTextProperty: true },
] as any; ] as unknown;
expect(extractTextFromContentPart(content)).toBe("Valid"); expect(extractTextFromContentPart(content)).toBe("Valid");
}); });
@ -75,15 +75,17 @@ describe("extractTextFromContentPart", () => {
"Just a string.", "Just a string.",
{ type: "image_url", image_url: { url: "http://example.com/image.png" } }, { type: "image_url", image_url: { url: "http://example.com/image.png" } },
{ type: "text", text: "Last part." }, { type: "text", text: "Last part." },
] as any; ] as unknown;
expect(extractTextFromContentPart(content)).toBe( expect(extractTextFromContentPart(content)).toBe(
"First part. Just a string. [Image] Last part.", "First part. Just a string. [Image] Last part."
); );
}); });
}); });
describe("extractDisplayableText (composite function)", () => { describe("extractDisplayableText (composite function)", () => {
const mockFormatToolCallToString = (toolCall: any) => { const mockFormatToolCallToString = (toolCall: {
function?: { name?: string; arguments?: unknown };
}) => {
if (!toolCall || !toolCall.function || !toolCall.function.name) return ""; if (!toolCall || !toolCall.function || !toolCall.function.name) return "";
const args = toolCall.function.arguments const args = toolCall.function.arguments
? JSON.stringify(toolCall.function.arguments) ? JSON.stringify(toolCall.function.arguments)
@ -125,7 +127,7 @@ describe("extractDisplayableText (composite function)", () => {
tool_calls: [toolCall], tool_calls: [toolCall],
}; };
expect(extractDisplayableText(messageWithEffectivelyEmptyContent)).toBe( expect(extractDisplayableText(messageWithEffectivelyEmptyContent)).toBe(
mockFormatToolCallToString(toolCall), mockFormatToolCallToString(toolCall)
); );
const messageWithEmptyContent: ChatMessage = { const messageWithEmptyContent: ChatMessage = {
@ -134,7 +136,7 @@ describe("extractDisplayableText (composite function)", () => {
tool_calls: [toolCall], tool_calls: [toolCall],
}; };
expect(extractDisplayableText(messageWithEmptyContent)).toBe( expect(extractDisplayableText(messageWithEmptyContent)).toBe(
mockFormatToolCallToString(toolCall), mockFormatToolCallToString(toolCall)
); );
}); });
@ -149,7 +151,7 @@ describe("extractDisplayableText (composite function)", () => {
}; };
const expectedToolCallStr = mockFormatToolCallToString(toolCall); const expectedToolCallStr = mockFormatToolCallToString(toolCall);
expect(extractDisplayableText(message)).toBe( expect(extractDisplayableText(message)).toBe(
`The result is: ${expectedToolCallStr}`, `The result is: ${expectedToolCallStr}`
); );
}); });
@ -167,7 +169,7 @@ describe("extractDisplayableText (composite function)", () => {
}; };
const expectedToolCallStr = mockFormatToolCallToString(toolCall); const expectedToolCallStr = mockFormatToolCallToString(toolCall);
expect(extractDisplayableText(message)).toBe( expect(extractDisplayableText(message)).toBe(
`Okay, checking weather for London. ${expectedToolCallStr}`, `Okay, checking weather for London. ${expectedToolCallStr}`
); );
}); });
@ -178,7 +180,7 @@ describe("extractDisplayableText (composite function)", () => {
tool_calls: [], tool_calls: [],
}; };
expect(extractDisplayableText(messageEmptyToolCalls)).toBe( expect(extractDisplayableText(messageEmptyToolCalls)).toBe(
"No tools here.", "No tools here."
); );
const messageUndefinedToolCalls: ChatMessage = { const messageUndefinedToolCalls: ChatMessage = {
@ -187,7 +189,7 @@ describe("extractDisplayableText (composite function)", () => {
tool_calls: undefined, tool_calls: undefined,
}; };
expect(extractDisplayableText(messageUndefinedToolCalls)).toBe( expect(extractDisplayableText(messageUndefinedToolCalls)).toBe(
"Still no tools.", "Still no tools."
); );
}); });
}); });

View file

@ -2,7 +2,7 @@ import { ChatMessage, ChatMessageContentPart } from "@/lib/types";
import { formatToolCallToString } from "@/lib/format-tool-call"; import { formatToolCallToString } from "@/lib/format-tool-call";
export function extractTextFromContentPart( export function extractTextFromContentPart(
content: string | ChatMessageContentPart[] | null | undefined, content: string | ChatMessageContentPart[] | null | undefined
): string { ): string {
if (content === null || content === undefined) { if (content === null || content === undefined) {
return ""; return "";
@ -37,7 +37,7 @@ export function extractTextFromContentPart(
} }
export function extractDisplayableText( export function extractDisplayableText(
message: ChatMessage | undefined | null, message: ChatMessage | undefined | null
): string { ): string {
if (!message) { if (!message) {
return ""; return "";

View file

@ -5,7 +5,9 @@
* with `name` and `arguments`. * with `name` and `arguments`.
* @returns A formatted string or an empty string if data is malformed. * @returns A formatted string or an empty string if data is malformed.
*/ */
export function formatToolCallToString(toolCall: any): string { export function formatToolCallToString(toolCall: {
function?: { name?: string; arguments?: unknown };
}): string {
if ( if (
!toolCall || !toolCall ||
!toolCall.function || !toolCall.function ||
@ -24,7 +26,7 @@ export function formatToolCallToString(toolCall: any): string {
} else { } else {
try { try {
argsString = JSON.stringify(args); argsString = JSON.stringify(args);
} catch (error) { } catch {
return ""; return "";
} }
} }

View file

@ -1,6 +1,6 @@
export function truncateText( export function truncateText(
text: string | null | undefined, text: string | null | undefined,
maxLength: number = 50, maxLength: number = 50
): string { ): string {
if (!text) return "N/A"; if (!text) return "N/A";
if (text.length <= maxLength) return text; if (text.length <= maxLength) return text;

View file

@ -5,7 +5,6 @@
# the root directory of this source tree. # the root directory of this source tree.
import os import os
import re
from pathlib import Path from pathlib import Path
import pytest import pytest
@ -48,19 +47,6 @@ def _load_all_verification_configs():
return {"providers": all_provider_configs} return {"providers": all_provider_configs}
def case_id_generator(case):
"""Generate a test ID from the case's 'case_id' field, or use a default."""
case_id = case.get("case_id")
if isinstance(case_id, str | int):
return re.sub(r"\\W|^(?=\\d)", "_", str(case_id))
return None
# Helper to get the base test name from the request object
def get_base_test_name(request):
return request.node.originalname
# --- End Helper Functions --- # --- End Helper Functions ---

View file

@ -1,16 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from pathlib import Path
import yaml
def load_test_cases(name: str):
fixture_dir = Path(__file__).parent / "test_cases"
yaml_path = fixture_dir / f"{name}.yaml"
with open(yaml_path) as f:
return yaml.safe_load(f)

View file

@ -0,0 +1,262 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any
import pytest
from pydantic import BaseModel
class ResponsesTestCase(BaseModel):
# Input can be a simple string or complex message structure
input: str | list[dict[str, Any]]
expected: str
# Tools as flexible dict structure (gets validated at runtime by the API)
tools: list[dict[str, Any]] | None = None
# Multi-turn conversations with input/output pairs
turns: list[tuple[str | list[dict[str, Any]], str]] | None = None
# File search specific fields
file_content: str | None = None
file_path: str | None = None
# Streaming flag
stream: bool | None = None
# Basic response test cases
basic_test_cases = [
pytest.param(
ResponsesTestCase(
input="Which planet do humans live on?",
expected="earth",
),
id="earth",
),
pytest.param(
ResponsesTestCase(
input="Which planet has rings around it with a name starting with letter S?",
expected="saturn",
),
id="saturn",
),
pytest.param(
ResponsesTestCase(
input=[
{
"role": "user",
"content": [
{
"type": "input_text",
"text": "what teams are playing in this image?",
}
],
},
{
"role": "user",
"content": [
{
"type": "input_image",
"image_url": "https://upload.wikimedia.org/wikipedia/commons/3/3b/LeBron_James_Layup_%28Cleveland_vs_Brooklyn_2018%29.jpg",
}
],
},
],
expected="brooklyn nets",
),
id="image_input",
),
]
# Multi-turn test cases
multi_turn_test_cases = [
pytest.param(
ResponsesTestCase(
input="", # Not used for multi-turn
expected="", # Not used for multi-turn
turns=[
("Which planet do humans live on?", "earth"),
("What is the name of the planet from your previous response?", "earth"),
],
),
id="earth",
),
]
# Web search test cases
web_search_test_cases = [
pytest.param(
ResponsesTestCase(
input="How many experts does the Llama 4 Maverick model have?",
tools=[{"type": "web_search", "search_context_size": "low"}],
expected="128",
),
id="llama_experts",
),
]
# File search test cases
file_search_test_cases = [
pytest.param(
ResponsesTestCase(
input="How many experts does the Llama 4 Maverick model have?",
tools=[{"type": "file_search"}],
expected="128",
file_content="Llama 4 Maverick has 128 experts",
),
id="llama_experts",
),
pytest.param(
ResponsesTestCase(
input="How many experts does the Llama 4 Maverick model have?",
tools=[{"type": "file_search"}],
expected="128",
file_path="pdfs/llama_stack_and_models.pdf",
),
id="llama_experts_pdf",
),
]
# MCP tool test cases
mcp_tool_test_cases = [
pytest.param(
ResponsesTestCase(
input="What is the boiling point of myawesomeliquid in Celsius?",
tools=[{"type": "mcp", "server_label": "localmcp", "server_url": "<FILLED_BY_TEST_RUNNER>"}],
expected="Hello, world!",
),
id="boiling_point_tool",
),
]
# Custom tool test cases
custom_tool_test_cases = [
pytest.param(
ResponsesTestCase(
input="What's the weather like in San Francisco?",
tools=[
{
"type": "function",
"name": "get_weather",
"description": "Get current temperature for a given location.",
"parameters": {
"additionalProperties": False,
"properties": {
"location": {
"description": "City and country e.g. Bogotá, Colombia",
"type": "string",
}
},
"required": ["location"],
"type": "object",
},
}
],
expected="", # No specific expected output for custom tools
),
id="sf_weather",
),
]
# Image test cases
image_test_cases = [
pytest.param(
ResponsesTestCase(
input=[
{
"role": "user",
"content": [
{
"type": "input_text",
"text": "Identify the type of animal in this image.",
},
{
"type": "input_image",
"image_url": "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg",
},
],
},
],
expected="llama",
),
id="llama_image",
),
]
# Multi-turn image test cases
multi_turn_image_test_cases = [
pytest.param(
ResponsesTestCase(
input="", # Not used for multi-turn
expected="", # Not used for multi-turn
turns=[
(
[
{
"role": "user",
"content": [
{
"type": "input_text",
"text": "What type of animal is in this image? Please respond with a single word that starts with the letter 'L'.",
},
{
"type": "input_image",
"image_url": "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg",
},
],
},
],
"llama",
),
(
"What country do you find this animal primarily in? What continent?",
"peru",
),
],
),
id="llama_image_understanding",
),
]
# Multi-turn tool execution test cases
multi_turn_tool_execution_test_cases = [
pytest.param(
ResponsesTestCase(
input="I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Do this as a series of steps, where each step is a separate message. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response.",
tools=[{"type": "mcp", "server_label": "localmcp", "server_url": "<FILLED_BY_TEST_RUNNER>"}],
expected="yes",
),
id="user_file_access_check",
),
pytest.param(
ResponsesTestCase(
input="I need to get the results for the 'boiling_point' experiment. First, get the experiment ID for 'boiling_point', then use that ID to get the experiment results. Tell me the boiling point in Celsius.",
tools=[{"type": "mcp", "server_label": "localmcp", "server_url": "<FILLED_BY_TEST_RUNNER>"}],
expected="100°C",
),
id="experiment_results_lookup",
),
]
# Multi-turn tool execution streaming test cases
multi_turn_tool_execution_streaming_test_cases = [
pytest.param(
ResponsesTestCase(
input="Help me with this security check: First, get the user ID for 'charlie', then get the permissions for that user ID, and finally check if that user can access 'secret_file.txt'. Stream your progress as you work through each step. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response.",
tools=[{"type": "mcp", "server_label": "localmcp", "server_url": "<FILLED_BY_TEST_RUNNER>"}],
expected="no",
stream=True,
),
id="user_permissions_workflow",
),
pytest.param(
ResponsesTestCase(
input="I need a complete analysis: First, get the experiment ID for 'chemical_reaction', then get the results for that experiment, and tell me if the yield was above 80%. Return only one tool call per step. Please stream your analysis process.",
tools=[{"type": "mcp", "server_label": "localmcp", "server_url": "<FILLED_BY_TEST_RUNNER>"}],
expected="85%",
stream=True,
),
id="experiment_analysis_streaming",
),
]

View file

@ -1,397 +0,0 @@
test_chat_basic:
test_name: test_chat_basic
test_params:
case:
- case_id: "earth"
input:
messages:
- content: Which planet do humans live on?
role: user
output: Earth
- case_id: "saturn"
input:
messages:
- content: Which planet has rings around it with a name starting with letter
S?
role: user
output: Saturn
test_chat_input_validation:
test_name: test_chat_input_validation
test_params:
case:
- case_id: "messages_missing"
input:
messages: []
output:
error:
status_code: 400
- case_id: "messages_role_invalid"
input:
messages:
- content: Which planet do humans live on?
role: fake_role
output:
error:
status_code: 400
- case_id: "tool_choice_invalid"
input:
messages:
- content: Which planet do humans live on?
role: user
tool_choice: invalid
output:
error:
status_code: 400
- case_id: "tool_choice_no_tools"
input:
messages:
- content: Which planet do humans live on?
role: user
tool_choice: required
output:
error:
status_code: 400
- case_id: "tools_type_invalid"
input:
messages:
- content: Which planet do humans live on?
role: user
tools:
- type: invalid
output:
error:
status_code: 400
test_chat_image:
test_name: test_chat_image
test_params:
case:
- input:
messages:
- content:
- text: What is in this image?
type: text
- image_url:
url: https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg
type: image_url
role: user
output: llama
test_chat_structured_output:
test_name: test_chat_structured_output
test_params:
case:
- case_id: "calendar"
input:
messages:
- content: Extract the event information.
role: system
- content: Alice and Bob are going to a science fair on Friday.
role: user
response_format:
json_schema:
name: calendar_event
schema:
properties:
date:
title: Date
type: string
name:
title: Name
type: string
participants:
items:
type: string
title: Participants
type: array
required:
- name
- date
- participants
title: CalendarEvent
type: object
type: json_schema
output: valid_calendar_event
- case_id: "math"
input:
messages:
- content: You are a helpful math tutor. Guide the user through the solution
step by step.
role: system
- content: how can I solve 8x + 7 = -23
role: user
response_format:
json_schema:
name: math_reasoning
schema:
$defs:
Step:
properties:
explanation:
title: Explanation
type: string
output:
title: Output
type: string
required:
- explanation
- output
title: Step
type: object
properties:
final_answer:
title: Final Answer
type: string
steps:
items:
$ref: '#/$defs/Step'
title: Steps
type: array
required:
- steps
- final_answer
title: MathReasoning
type: object
type: json_schema
output: valid_math_reasoning
test_tool_calling:
test_name: test_tool_calling
test_params:
case:
- input:
messages:
- content: You are a helpful assistant that can use tools to get information.
role: system
- content: What's the weather like in San Francisco?
role: user
tools:
- function:
description: Get current temperature for a given location.
name: get_weather
parameters:
additionalProperties: false
properties:
location:
description: "City and country e.g. Bogot\xE1, Colombia"
type: string
required:
- location
type: object
type: function
output: get_weather_tool_call
test_chat_multi_turn_tool_calling:
test_name: test_chat_multi_turn_tool_calling
test_params:
case:
- case_id: "text_then_weather_tool"
input:
messages:
- - role: user
content: "What's the name of the Sun in latin?"
- - role: user
content: "What's the weather like in San Francisco?"
tools:
- function:
description: Get the current weather
name: get_weather
parameters:
type: object
properties:
location:
description: "The city and state (both required), e.g. San Francisco, CA."
type: string
required: ["location"]
type: function
tool_responses:
- response: "{'response': '70 degrees and foggy'}"
expected:
- num_tool_calls: 0
answer: ["sol"]
- num_tool_calls: 1
tool_name: get_weather
tool_arguments:
location: "San Francisco, CA"
- num_tool_calls: 0
answer: ["foggy", "70 degrees"]
- case_id: "weather_tool_then_text"
input:
messages:
- - role: user
content: "What's the weather like in San Francisco?"
tools:
- function:
description: Get the current weather
name: get_weather
parameters:
type: object
properties:
location:
description: "The city and state (both required), e.g. San Francisco, CA."
type: string
required: ["location"]
type: function
tool_responses:
- response: "{'response': '70 degrees and foggy'}"
expected:
- num_tool_calls: 1
tool_name: get_weather
tool_arguments:
location: "San Francisco, CA"
- num_tool_calls: 0
answer: ["foggy", "70 degrees"]
- case_id: "add_product_tool"
input:
messages:
- - role: user
content: "Please add a new product with name 'Widget', price 19.99, in stock, and tags ['new', 'sale'] and give me the product id."
tools:
- function:
description: Add a new product
name: addProduct
parameters:
type: object
properties:
name:
description: "Name of the product"
type: string
price:
description: "Price of the product"
type: number
inStock:
description: "Availability status of the product."
type: boolean
tags:
description: "List of product tags"
type: array
items:
type: string
required: ["name", "price", "inStock"]
type: function
tool_responses:
- response: "{'response': 'Successfully added product with id: 123'}"
expected:
- num_tool_calls: 1
tool_name: addProduct
tool_arguments:
name: "Widget"
price: 19.99
inStock: true
tags:
- "new"
- "sale"
- num_tool_calls: 0
answer: ["123", "product id: 123"]
- case_id: "get_then_create_event_tool"
input:
messages:
- - role: system
content: "Todays date is 2025-03-01."
- role: user
content: "Do i have any meetings on March 3rd at 10 am? Yes or no?"
- - role: user
content: "Alright then, Create an event named 'Team Building', scheduled for that time same time, in the 'Main Conference Room' and add Alice, Bob, Charlie to it. Give me the created event id."
tools:
- function:
description: Create a new event
name: create_event
parameters:
type: object
properties:
name:
description: "Name of the event"
type: string
date:
description: "Date of the event in ISO format"
type: string
time:
description: "Event Time (HH:MM)"
type: string
location:
description: "Location of the event"
type: string
participants:
description: "List of participant names"
type: array
items:
type: string
required: ["name", "date", "time", "location", "participants"]
type: function
- function:
description: Get an event by date and time
name: get_event
parameters:
type: object
properties:
date:
description: "Date of the event in ISO format"
type: string
time:
description: "Event Time (HH:MM)"
type: string
required: ["date", "time"]
type: function
tool_responses:
- response: "{'response': 'No events found for 2025-03-03 at 10:00'}"
- response: "{'response': 'Successfully created new event with id: e_123'}"
expected:
- num_tool_calls: 1
tool_name: get_event
tool_arguments:
date: "2025-03-03"
time: "10:00"
- num_tool_calls: 0
answer: ["no", "no events found", "no meetings"]
- num_tool_calls: 1
tool_name: create_event
tool_arguments:
name: "Team Building"
date: "2025-03-03"
time: "10:00"
location: "Main Conference Room"
participants:
- "Alice"
- "Bob"
- "Charlie"
- num_tool_calls: 0
answer: ["e_123", "event id: e_123"]
- case_id: "compare_monthly_expense_tool"
input:
messages:
- - role: system
content: "Todays date is 2025-03-01."
- role: user
content: "what was my monthly expense in Jan of this year?"
- - role: user
content: "Was it less than Feb of last year? Only answer with yes or no."
tools:
- function:
description: Get monthly expense summary
name: getMonthlyExpenseSummary
parameters:
type: object
properties:
month:
description: "Month of the year (1-12)"
type: integer
year:
description: "Year"
type: integer
required: ["month", "year"]
type: function
tool_responses:
- response: "{'response': 'Total expenses for January 2025: $1000'}"
- response: "{'response': 'Total expenses for February 2024: $2000'}"
expected:
- num_tool_calls: 1
tool_name: getMonthlyExpenseSummary
tool_arguments:
month: 1
year: 2025
- num_tool_calls: 0
answer: ["1000", "$1,000", "1,000"]
- num_tool_calls: 1
tool_name: getMonthlyExpenseSummary
tool_arguments:
month: 2
year: 2024
- num_tool_calls: 0
answer: ["yes"]

View file

@ -1,166 +0,0 @@
test_response_basic:
test_name: test_response_basic
test_params:
case:
- case_id: "earth"
input: "Which planet do humans live on?"
output: "earth"
- case_id: "saturn"
input: "Which planet has rings around it with a name starting with letter S?"
output: "saturn"
- case_id: "image_input"
input:
- role: user
content:
- type: input_text
text: "what teams are playing in this image?"
- role: user
content:
- type: input_image
image_url: "https://upload.wikimedia.org/wikipedia/commons/3/3b/LeBron_James_Layup_%28Cleveland_vs_Brooklyn_2018%29.jpg"
output: "brooklyn nets"
test_response_multi_turn:
test_name: test_response_multi_turn
test_params:
case:
- case_id: "earth"
turns:
- input: "Which planet do humans live on?"
output: "earth"
- input: "What is the name of the planet from your previous response?"
output: "earth"
test_response_web_search:
test_name: test_response_web_search
test_params:
case:
- case_id: "llama_experts"
input: "How many experts does the Llama 4 Maverick model have?"
tools:
- type: web_search
search_context_size: "low"
output: "128"
test_response_file_search:
test_name: test_response_file_search
test_params:
case:
- case_id: "llama_experts"
input: "How many experts does the Llama 4 Maverick model have?"
tools:
- type: file_search
# vector_store_ids param for file_search tool gets added by the test runner
file_content: "Llama 4 Maverick has 128 experts"
output: "128"
- case_id: "llama_experts_pdf"
input: "How many experts does the Llama 4 Maverick model have?"
tools:
- type: file_search
# vector_store_ids param for file_search toolgets added by the test runner
file_path: "pdfs/llama_stack_and_models.pdf"
output: "128"
test_response_mcp_tool:
test_name: test_response_mcp_tool
test_params:
case:
- case_id: "boiling_point_tool"
input: "What is the boiling point of myawesomeliquid in Celsius?"
tools:
- type: mcp
server_label: "localmcp"
server_url: "<FILLED_BY_TEST_RUNNER>"
output: "Hello, world!"
test_response_custom_tool:
test_name: test_response_custom_tool
test_params:
case:
- case_id: "sf_weather"
input: "What's the weather like in San Francisco?"
tools:
- type: function
name: get_weather
description: Get current temperature for a given location.
parameters:
additionalProperties: false
properties:
location:
description: "City and country e.g. Bogot\xE1, Colombia"
type: string
required:
- location
type: object
test_response_image:
test_name: test_response_image
test_params:
case:
- case_id: "llama_image"
input:
- role: user
content:
- type: input_text
text: "Identify the type of animal in this image."
- type: input_image
image_url: "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg"
output: "llama"
# the models are really poor at tool calling after seeing images :/
test_response_multi_turn_image:
test_name: test_response_multi_turn_image
test_params:
case:
- case_id: "llama_image_understanding"
turns:
- input:
- role: user
content:
- type: input_text
text: "What type of animal is in this image? Please respond with a single word that starts with the letter 'L'."
- type: input_image
image_url: "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg"
output: "llama"
- input: "What country do you find this animal primarily in? What continent?"
output: "peru"
test_response_multi_turn_tool_execution:
test_name: test_response_multi_turn_tool_execution
test_params:
case:
- case_id: "user_file_access_check"
input: "I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Do this as a series of steps, where each step is a separate message. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
tools:
- type: mcp
server_label: "localmcp"
server_url: "<FILLED_BY_TEST_RUNNER>"
output: "yes"
- case_id: "experiment_results_lookup"
input: "I need to get the results for the 'boiling_point' experiment. First, get the experiment ID for 'boiling_point', then use that ID to get the experiment results. Tell me the boiling point in Celsius."
tools:
- type: mcp
server_label: "localmcp"
server_url: "<FILLED_BY_TEST_RUNNER>"
output: "100°C"
test_response_multi_turn_tool_execution_streaming:
test_name: test_response_multi_turn_tool_execution_streaming
test_params:
case:
- case_id: "user_permissions_workflow"
input: "Help me with this security check: First, get the user ID for 'charlie', then get the permissions for that user ID, and finally check if that user can access 'secret_file.txt'. Stream your progress as you work through each step. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
tools:
- type: mcp
server_label: "localmcp"
server_url: "<FILLED_BY_TEST_RUNNER>"
stream: true
output: "no"
- case_id: "experiment_analysis_streaming"
input: "I need a complete analysis: First, get the experiment ID for 'chemical_reaction', then get the results for that experiment, and tell me if the yield was above 80%. Return only one tool call per step. Please stream your analysis process."
tools:
- type: mcp
server_label: "localmcp"
server_url: "<FILLED_BY_TEST_RUNNER>"
stream: true
output: "85%"

View file

@ -0,0 +1,64 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import time
def new_vector_store(openai_client, name):
"""Create a new vector store, cleaning up any existing one with the same name."""
# Ensure we don't reuse an existing vector store
vector_stores = openai_client.vector_stores.list()
for vector_store in vector_stores:
if vector_store.name == name:
openai_client.vector_stores.delete(vector_store_id=vector_store.id)
# Create a new vector store
vector_store = openai_client.vector_stores.create(name=name)
return vector_store
def upload_file(openai_client, name, file_path):
"""Upload a file, cleaning up any existing file with the same name."""
# Ensure we don't reuse an existing file
files = openai_client.files.list()
for file in files:
if file.filename == name:
openai_client.files.delete(file_id=file.id)
# Upload a text file with our document content
return openai_client.files.create(file=open(file_path, "rb"), purpose="assistants")
def wait_for_file_attachment(compat_client, vector_store_id, file_id):
"""Wait for a file to be attached to a vector store."""
file_attach_response = compat_client.vector_stores.files.retrieve(
vector_store_id=vector_store_id,
file_id=file_id,
)
while file_attach_response.status == "in_progress":
time.sleep(0.1)
file_attach_response = compat_client.vector_stores.files.retrieve(
vector_store_id=vector_store_id,
file_id=file_id,
)
assert file_attach_response.status == "completed", f"Expected file to be attached, got {file_attach_response}"
assert not file_attach_response.last_error
return file_attach_response
def setup_mcp_tools(tools, mcp_server_info):
"""Replace placeholder MCP server URLs with actual server info."""
# Create a deep copy to avoid modifying the original test case
import copy
tools_copy = copy.deepcopy(tools)
for tool in tools_copy:
if tool["type"] == "mcp" and tool["server_url"] == "<FILLED_BY_TEST_RUNNER>":
tool["server_url"] = mcp_server_info["server_url"]
return tools_copy

View file

@ -0,0 +1,145 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any
class StreamingValidator:
"""Helper class for validating streaming response events."""
def __init__(self, chunks: list[Any]):
self.chunks = chunks
self.event_types = [chunk.type for chunk in chunks]
def assert_basic_event_sequence(self):
"""Verify basic created -> completed event sequence."""
assert len(self.chunks) >= 2, f"Expected at least 2 chunks (created + completed), got {len(self.chunks)}"
assert self.chunks[0].type == "response.created", (
f"First chunk should be response.created, got {self.chunks[0].type}"
)
assert self.chunks[-1].type == "response.completed", (
f"Last chunk should be response.completed, got {self.chunks[-1].type}"
)
# Verify event order
created_index = self.event_types.index("response.created")
completed_index = self.event_types.index("response.completed")
assert created_index < completed_index, "response.created should come before response.completed"
def assert_response_consistency(self):
"""Verify response ID consistency across events."""
response_ids = set()
for chunk in self.chunks:
if hasattr(chunk, "response_id"):
response_ids.add(chunk.response_id)
elif hasattr(chunk, "response") and hasattr(chunk.response, "id"):
response_ids.add(chunk.response.id)
assert len(response_ids) == 1, f"All events should reference the same response_id, found: {response_ids}"
def assert_has_incremental_content(self):
"""Verify that content is delivered incrementally via delta events."""
delta_events = [
i for i, event_type in enumerate(self.event_types) if event_type == "response.output_text.delta"
]
assert len(delta_events) > 0, "Expected delta events for true incremental streaming, but found none"
# Verify delta events have content
non_empty_deltas = 0
delta_content_total = ""
for delta_idx in delta_events:
chunk = self.chunks[delta_idx]
if hasattr(chunk, "delta") and chunk.delta:
delta_content_total += chunk.delta
non_empty_deltas += 1
assert non_empty_deltas > 0, "Delta events found but none contain content"
assert len(delta_content_total) > 0, "Delta events found but total delta content is empty"
return delta_content_total
def assert_content_quality(self, expected_content: str):
"""Verify the final response contains expected content."""
final_chunk = self.chunks[-1]
if hasattr(final_chunk, "response"):
output_text = final_chunk.response.output_text.lower().strip()
assert len(output_text) > 0, "Response should have content"
assert expected_content.lower() in output_text, f"Expected '{expected_content}' in response"
def assert_has_tool_calls(self):
"""Verify tool call streaming events are present."""
# Check for tool call events
delta_events = [
chunk
for chunk in self.chunks
if chunk.type in ["response.function_call_arguments.delta", "response.mcp_call.arguments.delta"]
]
done_events = [
chunk
for chunk in self.chunks
if chunk.type in ["response.function_call_arguments.done", "response.mcp_call.arguments.done"]
]
assert len(delta_events) > 0, f"Expected tool call delta events, got chunk types: {self.event_types}"
assert len(done_events) > 0, f"Expected tool call done events, got chunk types: {self.event_types}"
# Verify output item events
item_added_events = [chunk for chunk in self.chunks if chunk.type == "response.output_item.added"]
item_done_events = [chunk for chunk in self.chunks if chunk.type == "response.output_item.done"]
assert len(item_added_events) > 0, (
f"Expected response.output_item.added events, got chunk types: {self.event_types}"
)
assert len(item_done_events) > 0, (
f"Expected response.output_item.done events, got chunk types: {self.event_types}"
)
def assert_has_mcp_events(self):
"""Verify MCP-specific streaming events are present."""
# Tool execution progress events
mcp_in_progress_events = [chunk for chunk in self.chunks if chunk.type == "response.mcp_call.in_progress"]
mcp_completed_events = [chunk for chunk in self.chunks if chunk.type == "response.mcp_call.completed"]
assert len(mcp_in_progress_events) > 0, (
f"Expected response.mcp_call.in_progress events, got chunk types: {self.event_types}"
)
assert len(mcp_completed_events) > 0, (
f"Expected response.mcp_call.completed events, got chunk types: {self.event_types}"
)
# MCP list tools events
mcp_list_tools_in_progress_events = [
chunk for chunk in self.chunks if chunk.type == "response.mcp_list_tools.in_progress"
]
mcp_list_tools_completed_events = [
chunk for chunk in self.chunks if chunk.type == "response.mcp_list_tools.completed"
]
assert len(mcp_list_tools_in_progress_events) > 0, (
f"Expected response.mcp_list_tools.in_progress events, got chunk types: {self.event_types}"
)
assert len(mcp_list_tools_completed_events) > 0, (
f"Expected response.mcp_list_tools.completed events, got chunk types: {self.event_types}"
)
def assert_rich_streaming(self, min_chunks: int = 10):
"""Verify we have substantial streaming activity."""
assert len(self.chunks) > min_chunks, (
f"Expected rich streaming with many events, got only {len(self.chunks)} chunks"
)
def validate_event_structure(self):
"""Validate the structure of various event types."""
for chunk in self.chunks:
if chunk.type == "response.created":
assert chunk.response.status == "in_progress"
elif chunk.type == "response.completed":
assert chunk.response.status == "completed"
elif hasattr(chunk, "item_id"):
assert chunk.item_id, "Events with item_id should have non-empty item_id"
elif hasattr(chunk, "sequence_number"):
assert isinstance(chunk.sequence_number, int), "sequence_number should be an integer"

View file

@ -0,0 +1,188 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import time
import pytest
from fixtures.test_cases import basic_test_cases, image_test_cases, multi_turn_image_test_cases, multi_turn_test_cases
from streaming_assertions import StreamingValidator
@pytest.mark.parametrize("case", basic_test_cases)
def test_response_non_streaming_basic(compat_client, text_model_id, case):
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
stream=False,
)
output_text = response.output_text.lower().strip()
assert len(output_text) > 0
assert case.expected.lower() in output_text
retrieved_response = compat_client.responses.retrieve(response_id=response.id)
assert retrieved_response.output_text == response.output_text
next_response = compat_client.responses.create(
model=text_model_id,
input="Repeat your previous response in all caps.",
previous_response_id=response.id,
)
next_output_text = next_response.output_text.strip()
assert case.expected.upper() in next_output_text
@pytest.mark.parametrize("case", basic_test_cases)
def test_response_streaming_basic(compat_client, text_model_id, case):
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
stream=True,
)
# Track events and timing to verify proper streaming
events = []
event_times = []
response_id = ""
start_time = time.time()
for chunk in response:
current_time = time.time()
event_times.append(current_time - start_time)
events.append(chunk)
if chunk.type == "response.created":
# Verify response.created is emitted first and immediately
assert len(events) == 1, "response.created should be the first event"
assert event_times[0] < 0.1, "response.created should be emitted immediately"
assert chunk.response.status == "in_progress"
response_id = chunk.response.id
elif chunk.type == "response.completed":
# Verify response.completed comes after response.created
assert len(events) >= 2, "response.completed should come after response.created"
assert chunk.response.status == "completed"
assert chunk.response.id == response_id, "Response ID should be consistent"
# Verify content quality
output_text = chunk.response.output_text.lower().strip()
assert len(output_text) > 0, "Response should have content"
assert case.expected.lower() in output_text, f"Expected '{case.expected}' in response"
# Use validator for common checks
validator = StreamingValidator(events)
validator.assert_basic_event_sequence()
validator.assert_response_consistency()
# Verify stored response matches streamed response
retrieved_response = compat_client.responses.retrieve(response_id=response_id)
final_event = events[-1]
assert retrieved_response.output_text == final_event.response.output_text
@pytest.mark.parametrize("case", basic_test_cases)
def test_response_streaming_incremental_content(compat_client, text_model_id, case):
"""Test that streaming actually delivers content incrementally, not just at the end."""
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
stream=True,
)
# Track all events and their content to verify incremental streaming
events = []
content_snapshots = []
event_times = []
start_time = time.time()
for chunk in response:
current_time = time.time()
event_times.append(current_time - start_time)
events.append(chunk)
# Track content at each event based on event type
if chunk.type == "response.output_text.delta":
# For delta events, track the delta content
content_snapshots.append(chunk.delta)
elif hasattr(chunk, "response") and hasattr(chunk.response, "output_text"):
# For response.created/completed events, track the full output_text
content_snapshots.append(chunk.response.output_text)
else:
content_snapshots.append("")
validator = StreamingValidator(events)
validator.assert_basic_event_sequence()
# Check if we have incremental content updates
event_types = [event.type for event in events]
created_index = event_types.index("response.created")
completed_index = event_types.index("response.completed")
# The key test: verify content progression
created_content = content_snapshots[created_index]
completed_content = content_snapshots[completed_index]
# Verify that response.created has empty or minimal content
assert len(created_content) == 0, f"response.created should have empty content, got: {repr(created_content[:100])}"
# Verify that response.completed has the full content
assert len(completed_content) > 0, "response.completed should have content"
assert case.expected.lower() in completed_content.lower(), f"Expected '{case.expected}' in final content"
# Use validator for incremental content checks
delta_content_total = validator.assert_has_incremental_content()
# Verify that the accumulated delta content matches the final content
assert delta_content_total.strip() == completed_content.strip(), (
f"Delta content '{delta_content_total}' should match final content '{completed_content}'"
)
# Verify timing: delta events should come between created and completed
delta_events = [i for i, event_type in enumerate(event_types) if event_type == "response.output_text.delta"]
for delta_idx in delta_events:
assert created_index < delta_idx < completed_index, (
f"Delta event at index {delta_idx} should be between created ({created_index}) and completed ({completed_index})"
)
@pytest.mark.parametrize("case", multi_turn_test_cases)
def test_response_non_streaming_multi_turn(compat_client, text_model_id, case):
previous_response_id = None
for turn_input, turn_expected in case.turns:
response = compat_client.responses.create(
model=text_model_id,
input=turn_input,
previous_response_id=previous_response_id,
)
previous_response_id = response.id
output_text = response.output_text.lower()
assert turn_expected.lower() in output_text
@pytest.mark.parametrize("case", image_test_cases)
def test_response_non_streaming_image(compat_client, text_model_id, case):
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
stream=False,
)
output_text = response.output_text.lower()
assert case.expected.lower() in output_text
@pytest.mark.parametrize("case", multi_turn_image_test_cases)
def test_response_non_streaming_multi_turn_image(compat_client, text_model_id, case):
previous_response_id = None
for turn_input, turn_expected in case.turns:
response = compat_client.responses.create(
model=text_model_id,
input=turn_input,
previous_response_id=previous_response_id,
)
previous_response_id = response.id
output_text = response.output_text.lower()
assert turn_expected.lower() in output_text

View file

@ -0,0 +1,318 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import json
import time
import pytest
from llama_stack import LlamaStackAsLibraryClient
from .helpers import new_vector_store, upload_file
@pytest.mark.parametrize(
"text_format",
# Not testing json_object because most providers don't actually support it.
[
{"type": "text"},
{
"type": "json_schema",
"name": "capitals",
"description": "A schema for the capital of each country",
"schema": {"type": "object", "properties": {"capital": {"type": "string"}}},
"strict": True,
},
],
)
def test_response_text_format(compat_client, text_model_id, text_format):
if isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("Responses API text format is not yet supported in library client.")
stream = False
response = compat_client.responses.create(
model=text_model_id,
input="What is the capital of France?",
stream=stream,
text={"format": text_format},
)
# by_alias=True is needed because otherwise Pydantic renames our "schema" field
assert response.text.format.model_dump(exclude_none=True, by_alias=True) == text_format
assert "paris" in response.output_text.lower()
if text_format["type"] == "json_schema":
assert "paris" in json.loads(response.output_text)["capital"].lower()
@pytest.fixture
def vector_store_with_filtered_files(compat_client, text_model_id, tmp_path_factory):
"""Create a vector store with multiple files that have different attributes for filtering tests."""
if isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("Responses API file search is not yet supported in library client.")
vector_store = new_vector_store(compat_client, "test_vector_store_with_filters")
tmp_path = tmp_path_factory.mktemp("filter_test_files")
# Create multiple files with different attributes
files_data = [
{
"name": "us_marketing_q1.txt",
"content": "US promotional campaigns for Q1 2023. Revenue increased by 15% in the US region.",
"attributes": {
"region": "us",
"category": "marketing",
"date": 1672531200, # Jan 1, 2023
},
},
{
"name": "us_engineering_q2.txt",
"content": "US technical updates for Q2 2023. New features deployed in the US region.",
"attributes": {
"region": "us",
"category": "engineering",
"date": 1680307200, # Apr 1, 2023
},
},
{
"name": "eu_marketing_q1.txt",
"content": "European advertising campaign results for Q1 2023. Strong growth in EU markets.",
"attributes": {
"region": "eu",
"category": "marketing",
"date": 1672531200, # Jan 1, 2023
},
},
{
"name": "asia_sales_q3.txt",
"content": "Asia Pacific revenue figures for Q3 2023. Record breaking quarter in Asia.",
"attributes": {
"region": "asia",
"category": "sales",
"date": 1688169600, # Jul 1, 2023
},
},
]
file_ids = []
for file_data in files_data:
# Create file
file_path = tmp_path / file_data["name"]
file_path.write_text(file_data["content"])
# Upload file
file_response = upload_file(compat_client, file_data["name"], str(file_path))
file_ids.append(file_response.id)
# Attach file to vector store with attributes
file_attach_response = compat_client.vector_stores.files.create(
vector_store_id=vector_store.id,
file_id=file_response.id,
attributes=file_data["attributes"],
)
# Wait for attachment
while file_attach_response.status == "in_progress":
time.sleep(0.1)
file_attach_response = compat_client.vector_stores.files.retrieve(
vector_store_id=vector_store.id,
file_id=file_response.id,
)
assert file_attach_response.status == "completed"
yield vector_store
# Cleanup: delete vector store and files
try:
compat_client.vector_stores.delete(vector_store_id=vector_store.id)
for file_id in file_ids:
try:
compat_client.files.delete(file_id=file_id)
except Exception:
pass # File might already be deleted
except Exception:
pass # Best effort cleanup
def test_response_file_search_filter_by_region(compat_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with region equality filter."""
tools = [
{
"type": "file_search",
"vector_store_ids": [vector_store_with_filtered_files.id],
"filters": {"type": "eq", "key": "region", "value": "us"},
}
]
response = compat_client.responses.create(
model=text_model_id,
input="What are the updates from the US region?",
tools=tools,
stream=False,
include=["file_search_call.results"],
)
# Verify file search was called with US filter
assert len(response.output) > 1
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].results
# Should only return US files (not EU or Asia files)
for result in response.output[0].results:
assert "us" in result.text.lower() or "US" in result.text
# Ensure non-US regions are NOT returned
assert "european" not in result.text.lower()
assert "asia" not in result.text.lower()
def test_response_file_search_filter_by_category(compat_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with category equality filter."""
tools = [
{
"type": "file_search",
"vector_store_ids": [vector_store_with_filtered_files.id],
"filters": {"type": "eq", "key": "category", "value": "marketing"},
}
]
response = compat_client.responses.create(
model=text_model_id,
input="Show me all marketing reports",
tools=tools,
stream=False,
include=["file_search_call.results"],
)
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].results
# Should only return marketing files (not engineering or sales)
for result in response.output[0].results:
# Marketing files should have promotional/advertising content
assert "promotional" in result.text.lower() or "advertising" in result.text.lower()
# Ensure non-marketing categories are NOT returned
assert "technical" not in result.text.lower()
assert "revenue figures" not in result.text.lower()
def test_response_file_search_filter_by_date_range(compat_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with date range filter using compound AND."""
tools = [
{
"type": "file_search",
"vector_store_ids": [vector_store_with_filtered_files.id],
"filters": {
"type": "and",
"filters": [
{
"type": "gte",
"key": "date",
"value": 1672531200, # Jan 1, 2023
},
{
"type": "lt",
"key": "date",
"value": 1680307200, # Apr 1, 2023
},
],
},
}
]
response = compat_client.responses.create(
model=text_model_id,
input="What happened in Q1 2023?",
tools=tools,
stream=False,
include=["file_search_call.results"],
)
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].results
# Should only return Q1 files (not Q2 or Q3)
for result in response.output[0].results:
assert "q1" in result.text.lower()
# Ensure non-Q1 quarters are NOT returned
assert "q2" not in result.text.lower()
assert "q3" not in result.text.lower()
def test_response_file_search_filter_compound_and(compat_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with compound AND filter (region AND category)."""
tools = [
{
"type": "file_search",
"vector_store_ids": [vector_store_with_filtered_files.id],
"filters": {
"type": "and",
"filters": [
{"type": "eq", "key": "region", "value": "us"},
{"type": "eq", "key": "category", "value": "engineering"},
],
},
}
]
response = compat_client.responses.create(
model=text_model_id,
input="What are the engineering updates from the US?",
tools=tools,
stream=False,
include=["file_search_call.results"],
)
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].results
# Should only return US engineering files
assert len(response.output[0].results) >= 1
for result in response.output[0].results:
assert "us" in result.text.lower() and "technical" in result.text.lower()
# Ensure it's not from other regions or categories
assert "european" not in result.text.lower() and "asia" not in result.text.lower()
assert "promotional" not in result.text.lower() and "revenue" not in result.text.lower()
def test_response_file_search_filter_compound_or(compat_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with compound OR filter (marketing OR sales)."""
tools = [
{
"type": "file_search",
"vector_store_ids": [vector_store_with_filtered_files.id],
"filters": {
"type": "or",
"filters": [
{"type": "eq", "key": "category", "value": "marketing"},
{"type": "eq", "key": "category", "value": "sales"},
],
},
}
]
response = compat_client.responses.create(
model=text_model_id,
input="Show me marketing and sales documents",
tools=tools,
stream=False,
include=["file_search_call.results"],
)
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].results
# Should return marketing and sales files, but NOT engineering
categories_found = set()
for result in response.output[0].results:
text_lower = result.text.lower()
if "promotional" in text_lower or "advertising" in text_lower:
categories_found.add("marketing")
if "revenue figures" in text_lower:
categories_found.add("sales")
# Ensure engineering files are NOT returned
assert "technical" not in text_lower, f"Engineering file should not be returned, but got: {result.text}"
# Verify we got at least one of the expected categories
assert len(categories_found) > 0, "Should have found at least one marketing or sales file"
assert categories_found.issubset({"marketing", "sales"}), f"Found unexpected categories: {categories_found}"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,335 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import json
import os
import httpx
import openai
import pytest
from fixtures.test_cases import (
custom_tool_test_cases,
file_search_test_cases,
mcp_tool_test_cases,
multi_turn_tool_execution_streaming_test_cases,
multi_turn_tool_execution_test_cases,
web_search_test_cases,
)
from helpers import new_vector_store, setup_mcp_tools, upload_file, wait_for_file_attachment
from streaming_assertions import StreamingValidator
from llama_stack import LlamaStackAsLibraryClient
from llama_stack.core.datatypes import AuthenticationRequiredError
from tests.common.mcp import dependency_tools, make_mcp_server
@pytest.mark.parametrize("case", web_search_test_cases)
def test_response_non_streaming_web_search(compat_client, text_model_id, case):
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
tools=case.tools,
stream=False,
)
assert len(response.output) > 1
assert response.output[0].type == "web_search_call"
assert response.output[0].status == "completed"
assert response.output[1].type == "message"
assert response.output[1].status == "completed"
assert response.output[1].role == "assistant"
assert len(response.output[1].content) > 0
assert case.expected.lower() in response.output_text.lower().strip()
@pytest.mark.parametrize("case", file_search_test_cases)
def test_response_non_streaming_file_search(compat_client, text_model_id, tmp_path, case):
if isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("Responses API file search is not yet supported in library client.")
vector_store = new_vector_store(compat_client, "test_vector_store")
if case.file_content:
file_name = "test_response_non_streaming_file_search.txt"
file_path = tmp_path / file_name
file_path.write_text(case.file_content)
elif case.file_path:
file_path = os.path.join(os.path.dirname(__file__), "fixtures", case.file_path)
file_name = os.path.basename(file_path)
else:
raise ValueError("No file content or path provided for case")
file_response = upload_file(compat_client, file_name, file_path)
# Attach our file to the vector store
compat_client.vector_stores.files.create(
vector_store_id=vector_store.id,
file_id=file_response.id,
)
# Wait for the file to be attached
wait_for_file_attachment(compat_client, vector_store.id, file_response.id)
# Update our tools with the right vector store id
tools = case.tools
for tool in tools:
if tool["type"] == "file_search":
tool["vector_store_ids"] = [vector_store.id]
# Create the response request, which should query our vector store
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
stream=False,
include=["file_search_call.results"],
)
# Verify the file_search_tool was called
assert len(response.output) > 1
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].queries # ensure it's some non-empty list
assert response.output[0].results
assert case.expected.lower() in response.output[0].results[0].text.lower()
assert response.output[0].results[0].score > 0
# Verify the output_text generated by the response
assert case.expected.lower() in response.output_text.lower().strip()
def test_response_non_streaming_file_search_empty_vector_store(compat_client, text_model_id):
if isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("Responses API file search is not yet supported in library client.")
vector_store = new_vector_store(compat_client, "test_vector_store")
# Create the response request, which should query our vector store
response = compat_client.responses.create(
model=text_model_id,
input="How many experts does the Llama 4 Maverick model have?",
tools=[{"type": "file_search", "vector_store_ids": [vector_store.id]}],
stream=False,
include=["file_search_call.results"],
)
# Verify the file_search_tool was called
assert len(response.output) > 1
assert response.output[0].type == "file_search_call"
assert response.output[0].status == "completed"
assert response.output[0].queries # ensure it's some non-empty list
assert not response.output[0].results # ensure we don't get any results
# Verify some output_text was generated by the response
assert response.output_text
@pytest.mark.parametrize("case", mcp_tool_test_cases)
def test_response_non_streaming_mcp_tool(compat_client, text_model_id, case):
if not isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("in-process MCP server is only supported in library client")
with make_mcp_server() as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
stream=False,
)
assert len(response.output) >= 3
list_tools = response.output[0]
assert list_tools.type == "mcp_list_tools"
assert list_tools.server_label == "localmcp"
assert len(list_tools.tools) == 2
assert {t.name for t in list_tools.tools} == {
"get_boiling_point",
"greet_everyone",
}
call = response.output[1]
assert call.type == "mcp_call"
assert call.name == "get_boiling_point"
assert json.loads(call.arguments) == {
"liquid_name": "myawesomeliquid",
"celsius": True,
}
assert call.error is None
assert "-100" in call.output
# sometimes the model will call the tool again, so we need to get the last message
message = response.output[-1]
text_content = message.content[0].text
assert "boiling point" in text_content.lower()
with make_mcp_server(required_auth_token="test-token") as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
exc_type = (
AuthenticationRequiredError
if isinstance(compat_client, LlamaStackAsLibraryClient)
else (httpx.HTTPStatusError, openai.AuthenticationError)
)
with pytest.raises(exc_type):
compat_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
stream=False,
)
for tool in tools:
if tool["type"] == "mcp":
tool["headers"] = {"Authorization": "Bearer test-token"}
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
stream=False,
)
assert len(response.output) >= 3
@pytest.mark.parametrize("case", custom_tool_test_cases)
def test_response_non_streaming_custom_tool(compat_client, text_model_id, case):
response = compat_client.responses.create(
model=text_model_id,
input=case.input,
tools=case.tools,
stream=False,
)
assert len(response.output) == 1
assert response.output[0].type == "function_call"
assert response.output[0].status == "completed"
assert response.output[0].name == "get_weather"
@pytest.mark.parametrize("case", multi_turn_tool_execution_test_cases)
def test_response_non_streaming_multi_turn_tool_execution(compat_client, text_model_id, case):
"""Test multi-turn tool execution where multiple MCP tool calls are performed in sequence."""
if not isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("in-process MCP server is only supported in library client")
with make_mcp_server(tools=dependency_tools()) as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
response = compat_client.responses.create(
input=case.input,
model=text_model_id,
tools=tools,
)
# Verify we have MCP tool calls in the output
mcp_list_tools = [output for output in response.output if output.type == "mcp_list_tools"]
mcp_calls = [output for output in response.output if output.type == "mcp_call"]
message_outputs = [output for output in response.output if output.type == "message"]
# Should have exactly 1 MCP list tools message (at the beginning)
assert len(mcp_list_tools) == 1, f"Expected exactly 1 mcp_list_tools, got {len(mcp_list_tools)}"
assert mcp_list_tools[0].server_label == "localmcp"
assert len(mcp_list_tools[0].tools) == 5 # Updated for dependency tools
expected_tool_names = {
"get_user_id",
"get_user_permissions",
"check_file_access",
"get_experiment_id",
"get_experiment_results",
}
assert {t.name for t in mcp_list_tools[0].tools} == expected_tool_names
assert len(mcp_calls) >= 1, f"Expected at least 1 mcp_call, got {len(mcp_calls)}"
for mcp_call in mcp_calls:
assert mcp_call.error is None, f"MCP call should not have errors, got: {mcp_call.error}"
assert len(message_outputs) >= 1, f"Expected at least 1 message output, got {len(message_outputs)}"
final_message = message_outputs[-1]
assert final_message.role == "assistant", f"Final message should be from assistant, got {final_message.role}"
assert final_message.status == "completed", f"Final message should be completed, got {final_message.status}"
assert len(final_message.content) > 0, "Final message should have content"
expected_output = case.expected
assert expected_output.lower() in response.output_text.lower(), (
f"Expected '{expected_output}' to appear in response: {response.output_text}"
)
@pytest.mark.parametrize("case", multi_turn_tool_execution_streaming_test_cases)
def test_response_streaming_multi_turn_tool_execution(compat_client, text_model_id, case):
"""Test streaming multi-turn tool execution where multiple MCP tool calls are performed in sequence."""
if not isinstance(compat_client, LlamaStackAsLibraryClient):
pytest.skip("in-process MCP server is only supported in library client")
with make_mcp_server(tools=dependency_tools()) as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
stream = compat_client.responses.create(
input=case.input,
model=text_model_id,
tools=tools,
stream=True,
)
chunks = []
for chunk in stream:
chunks.append(chunk)
# Use validator for common streaming checks
validator = StreamingValidator(chunks)
validator.assert_basic_event_sequence()
validator.assert_response_consistency()
validator.assert_has_tool_calls()
validator.assert_has_mcp_events()
validator.assert_rich_streaming()
# Get the final response from the last chunk
final_chunk = chunks[-1]
if hasattr(final_chunk, "response"):
final_response = final_chunk.response
# Verify multi-turn MCP tool execution results
mcp_list_tools = [output for output in final_response.output if output.type == "mcp_list_tools"]
mcp_calls = [output for output in final_response.output if output.type == "mcp_call"]
message_outputs = [output for output in final_response.output if output.type == "message"]
# Should have exactly 1 MCP list tools message (at the beginning)
assert len(mcp_list_tools) == 1, f"Expected exactly 1 mcp_list_tools, got {len(mcp_list_tools)}"
assert mcp_list_tools[0].server_label == "localmcp"
assert len(mcp_list_tools[0].tools) == 5 # Updated for dependency tools
expected_tool_names = {
"get_user_id",
"get_user_permissions",
"check_file_access",
"get_experiment_id",
"get_experiment_results",
}
assert {t.name for t in mcp_list_tools[0].tools} == expected_tool_names
# Should have at least 1 MCP call (the model should call at least one tool)
assert len(mcp_calls) >= 1, f"Expected at least 1 mcp_call, got {len(mcp_calls)}"
# All MCP calls should be completed (verifies our tool execution works)
for mcp_call in mcp_calls:
assert mcp_call.error is None, f"MCP call should not have errors, got: {mcp_call.error}"
# Should have at least one final message response
assert len(message_outputs) >= 1, f"Expected at least 1 message output, got {len(message_outputs)}"
# Final message should be from assistant and completed
final_message = message_outputs[-1]
assert final_message.role == "assistant", (
f"Final message should be from assistant, got {final_message.role}"
)
assert final_message.status == "completed", f"Final message should be completed, got {final_message.status}"
assert len(final_message.content) > 0, "Final message should have content"
# Check that the expected output appears in the response
expected_output = case.expected
assert expected_output.lower() in final_response.output_text.lower(), (
f"Expected '{expected_output}' to appear in response: {final_response.output_text}"
)

View file

@ -14,7 +14,7 @@
"models": [ "models": [
{ {
"model": "nomic-embed-text:latest", "model": "nomic-embed-text:latest",
"modified_at": "2025-08-05T14:04:07.946926-07:00", "modified_at": "2025-08-14T20:26:10.795125-07:00",
"digest": "0a109f422b47e3a30ba2b10eca18548e944e8a23073ee3f3e947efcf3c45e59f", "digest": "0a109f422b47e3a30ba2b10eca18548e944e8a23073ee3f3e947efcf3c45e59f",
"size": 274302450, "size": 274302450,
"details": { "details": {

Some files were not shown because too many files have changed in this diff Show more