mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-20 05:48:40 +00:00
Add groq inference adapter.
This commit is contained in:
parent
8920c4216f
commit
0f73a4a829
10 changed files with 809 additions and 0 deletions
|
|
@ -23,6 +23,7 @@ from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
|
|||
from llama_stack.providers.remote.inference.together import TogetherImplConfig
|
||||
from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig
|
||||
from llama_stack.providers.remote.inference.sambanova import SambanovaImplConfig
|
||||
from llama_stack.providers.remote.inference.groq import GroqImplConfig
|
||||
from llama_stack.providers.tests.resolver import construct_stack_for_test
|
||||
|
||||
from ..conftest import ProviderFixture, remote_stack_fixture
|
||||
|
|
@ -171,6 +172,21 @@ def inference_sambanova() -> ProviderFixture:
|
|||
),
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def inference_groq() -> ProviderFixture:
|
||||
return ProviderFixture(
|
||||
providers=[
|
||||
Provider(
|
||||
provider_id="groq",
|
||||
provider_type="remote::groq",
|
||||
config=GroqImplConfig().model_dump(),
|
||||
)
|
||||
],
|
||||
provider_data=dict(
|
||||
groq_api_key=get_env_or_fail("GROQ_API_KEY"),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def get_model_short_name(model_name: str) -> str:
|
||||
"""Convert model name to a short test identifier.
|
||||
|
|
@ -207,6 +223,7 @@ INFERENCE_FIXTURES = [
|
|||
"bedrock",
|
||||
"nvidia",
|
||||
"sambanova",
|
||||
"groq",
|
||||
]
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue