mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-28 04:31:59 +00:00
Sambanova now using LiteLLM openai-compat, models and template updated
This commit is contained in:
parent
a9c5d3cd3d
commit
397eed9630
11 changed files with 138 additions and 365 deletions
|
|
@ -21,7 +21,13 @@ def skip_if_model_doesnt_support_completion(client_with_models, model_id):
|
|||
provider_id = models[model_id].provider_id
|
||||
providers = {p.provider_id: p for p in client_with_models.providers.list()}
|
||||
provider = providers[provider_id]
|
||||
if provider.provider_type in ("remote::openai", "remote::anthropic", "remote::gemini", "remote::groq"):
|
||||
if provider.provider_type in (
|
||||
"remote::openai",
|
||||
"remote::anthropic",
|
||||
"remote::gemini",
|
||||
"remote::groq",
|
||||
"remote::sambanova",
|
||||
):
|
||||
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support completion")
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue