From 3cace74458bde84e9c042509220418a6bf65dbc4 Mon Sep 17 00:00:00 2001 From: Dinesh Yeduguru Date: Sat, 23 Nov 2024 22:52:06 -0800 Subject: [PATCH] Add inference test fixture for tgi --- .../providers/tests/inference/fixtures.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/llama_stack/providers/tests/inference/fixtures.py b/llama_stack/providers/tests/inference/fixtures.py index 2007818e5..683dbe772 100644 --- a/llama_stack/providers/tests/inference/fixtures.py +++ b/llama_stack/providers/tests/inference/fixtures.py @@ -20,6 +20,7 @@ from llama_stack.providers.remote.inference.bedrock import BedrockConfig from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig from llama_stack.providers.remote.inference.ollama import OllamaImplConfig +from llama_stack.providers.remote.inference.tgi import TGIImplConfig from llama_stack.providers.remote.inference.together import TogetherImplConfig from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig from llama_stack.providers.tests.resolver import construct_stack_for_test @@ -156,6 +157,22 @@ def inference_nvidia() -> ProviderFixture: ) +@pytest.fixture(scope="session") +def inference_tgi() -> ProviderFixture: + return ProviderFixture( + providers=[ + Provider( + provider_id="tgi", + provider_type="remote::tgi", + config=TGIImplConfig( + url=get_env_or_fail("TGI_URL"), + api_token=get_env_or_fail("TGI_API_TOKEN"), + ).model_dump(), + ) + ], + ) + + def get_model_short_name(model_name: str) -> str: """Convert model name to a short test identifier. @@ -190,6 +207,7 @@ INFERENCE_FIXTURES = [ "remote", "bedrock", "nvidia", + "tgi", ]