mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 20:09:47 +00:00
portkey integration v2
This commit is contained in:
parent
7ece0d4d8b
commit
71f27f6676
6 changed files with 266 additions and 0 deletions
|
|
@ -22,6 +22,7 @@ from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig
|
|||
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
|
||||
from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
|
||||
from llama_stack.providers.remote.inference.tgi import TGIImplConfig
|
||||
from llama_stack.providers.remote.inference.portkey import PortkeyImplConfig
|
||||
from llama_stack.providers.remote.inference.together import TogetherImplConfig
|
||||
from llama_stack.providers.remote.inference.vllm import VLLMInferenceAdapterConfig
|
||||
from llama_stack.providers.tests.resolver import construct_stack_for_test
|
||||
|
|
@ -82,6 +83,21 @@ def inference_cerebras() -> ProviderFixture:
|
|||
],
|
||||
)
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def inference_cerebras() -> ProviderFixture:
|
||||
return ProviderFixture(
|
||||
providers=[
|
||||
Provider(
|
||||
provider_id="portkey",
|
||||
provider_type="remote::portkey",
|
||||
config=CerebrasImplConfig(
|
||||
api_key=get_env_or_fail("PORTKEY_API_KEY"),
|
||||
).model_dump(),
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def inference_ollama(inference_model) -> ProviderFixture:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue