mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-19 07:09:40 +00:00
add groq inference provider
This commit is contained in:
parent
34be07e0df
commit
74a6aa2c81
6 changed files with 480 additions and 0 deletions
|
|
@ -18,6 +18,7 @@ from llama_stack.providers.inline.inference.meta_reference import (
|
|||
from llama_stack.providers.remote.inference.bedrock import BedrockConfig
|
||||
|
||||
from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig
|
||||
from llama_stack.providers.remote.inference.groq import GroqImplConfig
|
||||
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
|
||||
from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
|
||||
from llama_stack.providers.remote.inference.together import TogetherImplConfig
|
||||
|
|
@ -114,6 +115,21 @@ def inference_fireworks() -> ProviderFixture:
|
|||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def inference_groq() -> ProviderFixture:
|
||||
return ProviderFixture(
|
||||
providers=[
|
||||
Provider(
|
||||
provider_id="groq",
|
||||
provider_type="remote::groq",
|
||||
config=GroqImplConfig(
|
||||
api_key=get_env_or_fail("GROQ_API_KEY"),
|
||||
).model_dump(),
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def inference_together() -> ProviderFixture:
|
||||
return ProviderFixture(
|
||||
|
|
@ -190,6 +206,7 @@ INFERENCE_FIXTURES = [
|
|||
"remote",
|
||||
"bedrock",
|
||||
"nvidia",
|
||||
"groq",
|
||||
]
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue