forked from phoenix-oss/llama-stack-mirror
# What does this PR do? This PR introduces more non-llama model support to llama stack. Providers introduced: openai, anthropic and gemini. All of these providers use essentially the same piece of code -- the implementation works via the `litellm` library. We will expose only specific models for providers we enable making sure they all work well and pass tests. This setup (instead of automatically enabling _all_ providers and models allowed by LiteLLM) ensures we can also perform any needed prompt tuning on a per-model basis as needed (just like we do it for llama models.) ## Test Plan ```bash #!/bin/bash args=("$@") for model in openai/gpt-4o anthropic/claude-3-5-sonnet-latest gemini/gemini-1.5-flash; do LLAMA_STACK_CONFIG=dev pytest -s -v tests/client-sdk/inference/test_text_inference.py \ --embedding-model=all-MiniLM-L6-v2 \ --vision-inference-model="" \ --inference-model=$model "${args[@]}" done ```
23 lines
577 B
Python
23 lines
577 B
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Optional
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from .config import GeminiConfig
|
|
|
|
|
|
class GeminiProviderDataValidator(BaseModel):
|
|
gemini_api_key: Optional[str] = None
|
|
|
|
|
|
async def get_adapter_impl(config: GeminiConfig, _deps):
|
|
from .gemini import GeminiInferenceAdapter
|
|
|
|
impl = GeminiInferenceAdapter(config)
|
|
await impl.initialize()
|
|
return impl
|