mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
# What does this PR do? - as title, cleaning up `import *`'s - upgrade tests to make them more robust to bad model outputs - remove import *'s in llama_stack/apis/* (skip __init__ modules) <img width="465" alt="image" src="https://github.com/user-attachments/assets/d8339c13-3b40-4ba5-9c53-0d2329726ee2" /> - run `sh run_openapi_generator.sh`, no types gets affected ## Test Plan ### Providers Tests **agents** ``` pytest -v -s llama_stack/providers/tests/agents/test_agents.py -m "together" --safety-shield meta-llama/Llama-Guard-3-8B --inference-model meta-llama/Llama-3.1-405B-Instruct-FP8 ``` **inference** ```bash # meta-reference torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py # together pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py pytest ./llama_stack/providers/tests/inference/test_prompt_adapter.py ``` **safety** ``` pytest -v -s llama_stack/providers/tests/safety/test_safety.py -m together --safety-shield meta-llama/Llama-Guard-3-8B ``` **memory** ``` pytest -v -s llama_stack/providers/tests/memory/test_memory.py -m "sentence_transformers" --env EMBEDDING_DIMENSION=384 ``` **scoring** ``` pytest -v -s -m llm_as_judge_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py --judge-model meta-llama/Llama-3.2-3B-Instruct pytest -v -s -m basic_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py pytest -v -s -m braintrust_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py ``` **datasetio** ``` pytest -v -s -m localfs llama_stack/providers/tests/datasetio/test_datasetio.py pytest -v -s -m huggingface llama_stack/providers/tests/datasetio/test_datasetio.py ``` **eval** ``` pytest -v -s -m meta_reference_eval_together_inference llama_stack/providers/tests/eval/test_eval.py pytest -v -s -m meta_reference_eval_together_inference_huggingface_datasetio llama_stack/providers/tests/eval/test_eval.py ``` ### Client-SDK Tests ``` LLAMA_STACK_BASE_URL=http://localhost:5000 pytest -v ./tests/client-sdk ``` ### llama-stack-apps ``` PORT=5000 LOCALHOST=localhost python -m examples.agents.hello $LOCALHOST $PORT python -m examples.agents.inflation $LOCALHOST $PORT python -m examples.agents.podcast_transcript $LOCALHOST $PORT python -m examples.agents.rag_as_attachments $LOCALHOST $PORT python -m examples.agents.rag_with_memory_bank $LOCALHOST $PORT python -m examples.safety.llama_guard_demo_mm $LOCALHOST $PORT python -m examples.agents.e2e_loop_with_custom_tools $LOCALHOST $PORT # Vision model python -m examples.interior_design_assistant.app python -m examples.agent_store.app $LOCALHOST $PORT ``` ### CLI ``` which llama llama model prompt-format -m Llama3.2-11B-Vision-Instruct llama model list llama stack list-apis llama stack list-providers inference llama stack build --template ollama --image-type conda ``` ### Distributions Tests **ollama** ``` llama stack build --template ollama --image-type conda ollama run llama3.2:1b-instruct-fp16 llama stack run ./llama_stack/templates/ollama/run.yaml --env INFERENCE_MODEL=meta-llama/Llama-3.2-1B-Instruct ``` **fireworks** ``` llama stack build --template fireworks --image-type conda llama stack run ./llama_stack/templates/fireworks/run.yaml ``` **together** ``` llama stack build --template together --image-type conda llama stack run ./llama_stack/templates/together/run.yaml ``` **tgi** ``` llama stack run ./llama_stack/templates/tgi/run.yaml --env TGI_URL=http://0.0.0.0:5009 --env INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
88 lines
2.7 KiB
Python
88 lines
2.7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import json
|
|
import logging
|
|
import uuid
|
|
from datetime import datetime
|
|
|
|
from typing import List, Optional
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from llama_stack.apis.agents import Turn
|
|
|
|
from llama_stack.providers.utils.kvstore import KVStore
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
class AgentSessionInfo(BaseModel):
|
|
session_id: str
|
|
session_name: str
|
|
memory_bank_id: Optional[str] = None
|
|
started_at: datetime
|
|
|
|
|
|
class AgentPersistence:
|
|
def __init__(self, agent_id: str, kvstore: KVStore):
|
|
self.agent_id = agent_id
|
|
self.kvstore = kvstore
|
|
|
|
async def create_session(self, name: str) -> str:
|
|
session_id = str(uuid.uuid4())
|
|
session_info = AgentSessionInfo(
|
|
session_id=session_id,
|
|
session_name=name,
|
|
started_at=datetime.now(),
|
|
)
|
|
await self.kvstore.set(
|
|
key=f"session:{self.agent_id}:{session_id}",
|
|
value=session_info.model_dump_json(),
|
|
)
|
|
return session_id
|
|
|
|
async def get_session_info(self, session_id: str) -> Optional[AgentSessionInfo]:
|
|
value = await self.kvstore.get(
|
|
key=f"session:{self.agent_id}:{session_id}",
|
|
)
|
|
if not value:
|
|
return None
|
|
|
|
return AgentSessionInfo(**json.loads(value))
|
|
|
|
async def add_memory_bank_to_session(self, session_id: str, bank_id: str):
|
|
session_info = await self.get_session_info(session_id)
|
|
if session_info is None:
|
|
raise ValueError(f"Session {session_id} not found")
|
|
|
|
session_info.memory_bank_id = bank_id
|
|
await self.kvstore.set(
|
|
key=f"session:{self.agent_id}:{session_id}",
|
|
value=session_info.model_dump_json(),
|
|
)
|
|
|
|
async def add_turn_to_session(self, session_id: str, turn: Turn):
|
|
await self.kvstore.set(
|
|
key=f"session:{self.agent_id}:{session_id}:{turn.turn_id}",
|
|
value=turn.model_dump_json(),
|
|
)
|
|
|
|
async def get_session_turns(self, session_id: str) -> List[Turn]:
|
|
values = await self.kvstore.range(
|
|
start_key=f"session:{self.agent_id}:{session_id}:",
|
|
end_key=f"session:{self.agent_id}:{session_id}:\xff\xff\xff\xff",
|
|
)
|
|
turns = []
|
|
for value in values:
|
|
try:
|
|
turn = Turn(**json.loads(value))
|
|
turns.append(turn)
|
|
except Exception as e:
|
|
log.error(f"Error parsing turn: {e}")
|
|
continue
|
|
turns.sort(key=lambda x: (x.completed_at or datetime.min))
|
|
return turns
|