mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 13:00:39 +00:00
Some checks failed
Integration Tests (Replay) / discover-tests (push) Successful in 9s
Python Package Build Test / build (3.12) (push) Failing after 4s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 10s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 12s
Python Package Build Test / build (3.13) (push) Failing after 4s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 10s
Test Llama Stack Build / generate-matrix (push) Successful in 8s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 13s
Test External API and Providers / test-external (venv) (push) Failing after 11s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 17s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 10s
Test Llama Stack Build / build-single-provider (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 8s
Unit Tests / unit-tests (3.12) (push) Failing after 10s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 26s
Vector IO Integration Tests / test-matrix (3.12, remote::qdrant) (push) Failing after 15s
Update ReadTheDocs / update-readthedocs (push) Failing after 9s
Integration Tests (Replay) / Integration Tests (, , , client=, vision=) (push) Failing after 7s
Vector IO Integration Tests / test-matrix (3.13, remote::weaviate) (push) Failing after 11s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 23s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.12, remote::weaviate) (push) Failing after 18s
Test Llama Stack Build / build (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.13, remote::qdrant) (push) Failing after 17s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 21s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 47s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 49s
Unit Tests / unit-tests (3.13) (push) Failing after 39s
Pre-commit / pre-commit (push) Successful in 1m37s
# What does this PR do? - Add new Vertex AI remote inference provider with litellm integration - Support for Gemini models through Google Cloud Vertex AI platform - Uses Google Cloud Application Default Credentials (ADC) for authentication - Added VertexAI models: gemini-2.5-flash, gemini-2.5-pro, gemini-2.0-flash. - Updated provider registry to include vertexai provider - Updated starter template to support Vertex AI configuration - Added comprehensive documentation and sample configuration <!-- If resolving an issue, uncomment and update the line below --> relates to https://github.com/meta-llama/llama-stack/issues/2747 ## Test Plan <!-- Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.* --> Signed-off-by: Eran Cohen <eranco@redhat.com> Co-authored-by: Francisco Arceo <arceofrancisco@gmail.com>
52 lines
1.9 KiB
Python
52 lines
1.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Any
|
|
|
|
from llama_stack.apis.inference import ChatCompletionRequest
|
|
from llama_stack.providers.utils.inference.litellm_openai_mixin import (
|
|
LiteLLMOpenAIMixin,
|
|
)
|
|
|
|
from .config import VertexAIConfig
|
|
from .models import MODEL_ENTRIES
|
|
|
|
|
|
class VertexAIInferenceAdapter(LiteLLMOpenAIMixin):
|
|
def __init__(self, config: VertexAIConfig) -> None:
|
|
LiteLLMOpenAIMixin.__init__(
|
|
self,
|
|
MODEL_ENTRIES,
|
|
litellm_provider_name="vertex_ai",
|
|
api_key_from_config=None, # Vertex AI uses ADC, not API keys
|
|
provider_data_api_key_field="vertex_project", # Use project for validation
|
|
)
|
|
self.config = config
|
|
|
|
def get_api_key(self) -> str:
|
|
# Vertex AI doesn't use API keys, it uses Application Default Credentials
|
|
# Return empty string to let litellm handle authentication via ADC
|
|
return ""
|
|
|
|
async def _get_params(self, request: ChatCompletionRequest) -> dict[str, Any]:
|
|
# Get base parameters from parent
|
|
params = await super()._get_params(request)
|
|
|
|
# Add Vertex AI specific parameters
|
|
provider_data = self.get_request_provider_data()
|
|
if provider_data:
|
|
if getattr(provider_data, "vertex_project", None):
|
|
params["vertex_project"] = provider_data.vertex_project
|
|
if getattr(provider_data, "vertex_location", None):
|
|
params["vertex_location"] = provider_data.vertex_location
|
|
else:
|
|
params["vertex_project"] = self.config.project
|
|
params["vertex_location"] = self.config.location
|
|
|
|
# Remove api_key since Vertex AI uses ADC
|
|
params.pop("api_key", None)
|
|
|
|
return params
|