add groq inference provider

This commit is contained in:
Benjamin Klieger 2024-11-25 17:54:14 -08:00
parent 34be07e0df
commit 74a6aa2c81
6 changed files with 480 additions and 0 deletions

View file

@ -18,6 +18,7 @@ from llama_stack.providers.inline.inference.meta_reference import (
from llama_stack.providers.remote.inference.bedrock import BedrockConfig
from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig
from llama_stack.providers.remote.inference.groq import GroqImplConfig
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
from llama_stack.providers.remote.inference.together import TogetherImplConfig
@ -114,6 +115,21 @@ def inference_fireworks() -> ProviderFixture:
)
@pytest.fixture(scope="session")
def inference_groq() -> ProviderFixture:
return ProviderFixture(
providers=[
Provider(
provider_id="groq",
provider_type="remote::groq",
config=GroqImplConfig(
api_key=get_env_or_fail("GROQ_API_KEY"),
).model_dump(),
)
],
)
@pytest.fixture(scope="session")
def inference_together() -> ProviderFixture:
return ProviderFixture(
@ -190,6 +206,7 @@ INFERENCE_FIXTURES = [
"remote",
"bedrock",
"nvidia",
"groq",
]