Kill noise from test output

This commit is contained in:
Ashwin Bharambe 2025-02-21 15:37:23 -08:00
parent bf38d0aba0
commit 45ffe87d7c
3 changed files with 4 additions and 11 deletions

View file

@ -11,8 +11,6 @@ import tempfile
import uuid import uuid
from typing import AsyncGenerator, List, Optional, Union from typing import AsyncGenerator, List, Optional, Union
from termcolor import colored
from llama_stack.apis.agents import ( from llama_stack.apis.agents import (
AgentConfig, AgentConfig,
AgentCreateResponse, AgentCreateResponse,
@ -69,12 +67,7 @@ class MetaReferenceAgentsImpl(Agents):
# check if "bwrap" is available # check if "bwrap" is available
if not shutil.which("bwrap"): if not shutil.which("bwrap"):
print( logger.warning("Warning: `bwrap` is not available. Code interpreter tool will not work correctly.")
colored(
"Warning: `bwrap` is not available. Code interpreter tool will not work correctly.",
"yellow",
)
)
async def create_agent( async def create_agent(
self, self,

View file

@ -35,7 +35,9 @@ class SentenceTransformerEmbeddingMixin:
) -> EmbeddingsResponse: ) -> EmbeddingsResponse:
model = await self.model_store.get_model(model_id) model = await self.model_store.get_model(model_id)
embedding_model = self._load_sentence_transformer_model(model.provider_resource_id) embedding_model = self._load_sentence_transformer_model(model.provider_resource_id)
embeddings = embedding_model.encode([interleaved_content_as_str(content) for content in contents]) embeddings = embedding_model.encode(
[interleaved_content_as_str(content) for content in contents], show_progress_bar=False
)
return EmbeddingsResponse(embeddings=embeddings) return EmbeddingsResponse(embeddings=embeddings)
def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer": def _load_sentence_transformer_model(self, model: str) -> "SentenceTransformer":

View file

@ -90,7 +90,6 @@ class TestClientTool(ClientTool):
def agent_config(llama_stack_client, text_model_id): def agent_config(llama_stack_client, text_model_id):
available_shields = [shield.identifier for shield in llama_stack_client.shields.list()] available_shields = [shield.identifier for shield in llama_stack_client.shields.list()]
available_shields = available_shields[:1] available_shields = available_shields[:1]
print(f"Using shield: {available_shields}")
agent_config = AgentConfig( agent_config = AgentConfig(
model=text_model_id, model=text_model_id,
instructions="You are a helpful assistant", instructions="You are a helpful assistant",
@ -489,7 +488,6 @@ def test_rag_agent(llama_stack_client, agent_config):
), ),
] ]
for prompt, expected_kw in user_prompts: for prompt, expected_kw in user_prompts:
print(f"User> {prompt}")
response = rag_agent.create_turn( response = rag_agent.create_turn(
messages=[{"role": "user", "content": prompt}], messages=[{"role": "user", "content": prompt}],
session_id=session_id, session_id=session_id,