Add groq inference adapter.

This commit is contained in:
swanhtet1992 2024-11-24 03:27:05 -06:00
parent 8920c4216f
commit 0f73a4a829
10 changed files with 809 additions and 0 deletions

View file

@ -23,6 +23,7 @@ from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
from llama_stack.providers.remote.inference.together import TogetherImplConfig
from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig
from llama_stack.providers.remote.inference.sambanova import SambanovaImplConfig
from llama_stack.providers.remote.inference.groq import GroqImplConfig
from llama_stack.providers.tests.resolver import construct_stack_for_test
from ..conftest import ProviderFixture, remote_stack_fixture
@ -171,6 +172,21 @@ def inference_sambanova() -> ProviderFixture:
),
)
@pytest.fixture(scope="session")
def inference_groq() -> ProviderFixture:
return ProviderFixture(
providers=[
Provider(
provider_id="groq",
provider_type="remote::groq",
config=GroqImplConfig().model_dump(),
)
],
provider_data=dict(
groq_api_key=get_env_or_fail("GROQ_API_KEY"),
),
)
def get_model_short_name(model_name: str) -> str:
"""Convert model name to a short test identifier.
@ -207,6 +223,7 @@ INFERENCE_FIXTURES = [
"bedrock",
"nvidia",
"sambanova",
"groq",
]