chore(tests): fix responses and vector_io tests

This commit is contained in:
Ashwin Bharambe 2025-08-12 11:12:01 -07:00
parent 1721aafc1f
commit 1c2ece229c
12 changed files with 41 additions and 49 deletions

View file

@ -65,7 +65,7 @@ from llama_stack.providers.datatypes import HealthResponse, HealthStatus, Routin
from llama_stack.providers.utils.inference.inference_store import InferenceStore
from llama_stack.providers.utils.telemetry.tracing import get_current_span
logger = get_logger(name=__name__, category="core")
logger = get_logger(name=__name__, category="inference")
class InferenceRouter(Inference):
@ -854,4 +854,5 @@ class InferenceRouter(Inference):
model=model.identifier,
object="chat.completion",
)
logger.debug(f"InferenceRouter.completion_response: {final_response}")
await self.store.store_chat_completion(final_response, messages)

View file

@ -63,6 +63,8 @@ class ModelsRoutingTable(CommonRoutingTableImpl, Models):
async def get_provider_impl(self, model_id: str) -> Any:
model = await lookup_model(self, model_id)
if model.provider_id not in self.impls_by_provider_id:
raise ValueError(f"Provider {model.provider_id} not found in the routing table")
return self.impls_by_provider_id[model.provider_id]
async def register_model(