mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-25 21:57:45 +00:00
Some checks failed
Coverage Badge / unit-tests (push) Failing after 3s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 6s
Python Package Build Test / build (3.12) (push) Failing after 3s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 6s
Integration Tests / discover-tests (push) Successful in 7s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 6s
Python Package Build Test / build (3.13) (push) Failing after 2s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 6s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 5s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 9s
Unit Tests / unit-tests (3.12) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 11s
Test External Providers / test-external-providers (venv) (push) Failing after 8s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 12s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 9s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 17s
Unit Tests / unit-tests (3.13) (push) Failing after 12s
Update ReadTheDocs / update-readthedocs (push) Failing after 11s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 16s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 18s
Integration Tests / test-matrix (push) Failing after 18s
Pre-commit / pre-commit (push) Successful in 1m14s
# What does this PR do? add an `OpenAIMixin` for use by inference providers who remote endpoints support an OpenAI compatible API. use is demonstrated by refactoring - OpenAIInferenceAdapter - NVIDIAInferenceAdapter (adds embedding support) - LlamaCompatInferenceAdapter ## Test Plan existing unit and integration tests
90 lines
4 KiB
Python
90 lines
4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import json
|
|
from unittest.mock import MagicMock
|
|
|
|
from llama_stack.distribution.request_headers import request_provider_data_context
|
|
from llama_stack.providers.remote.inference.groq.config import GroqConfig
|
|
from llama_stack.providers.remote.inference.groq.groq import GroqInferenceAdapter
|
|
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
|
|
from llama_stack.providers.remote.inference.llama_openai_compat.llama import LlamaCompatInferenceAdapter
|
|
from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
|
|
from llama_stack.providers.remote.inference.openai.openai import OpenAIInferenceAdapter
|
|
from llama_stack.providers.remote.inference.together.config import TogetherImplConfig
|
|
from llama_stack.providers.remote.inference.together.together import TogetherInferenceAdapter
|
|
|
|
|
|
def test_groq_provider_openai_client_caching():
|
|
"""Ensure the Groq provider does not cache api keys across client requests"""
|
|
|
|
config = GroqConfig()
|
|
inference_adapter = GroqInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context(
|
|
{"x-llamastack-provider-data": json.dumps({inference_adapter.provider_data_api_key_field: api_key})}
|
|
):
|
|
openai_client = inference_adapter._get_openai_client()
|
|
assert openai_client.api_key == api_key
|
|
|
|
|
|
def test_openai_provider_openai_client_caching():
|
|
"""Ensure the OpenAI provider does not cache api keys across client requests"""
|
|
|
|
config = OpenAIConfig()
|
|
inference_adapter = OpenAIInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context(
|
|
{"x-llamastack-provider-data": json.dumps({inference_adapter.provider_data_api_key_field: api_key})}
|
|
):
|
|
openai_client = inference_adapter.client
|
|
assert openai_client.api_key == api_key
|
|
|
|
|
|
def test_together_provider_openai_client_caching():
|
|
"""Ensure the Together provider does not cache api keys across client requests"""
|
|
|
|
config = TogetherImplConfig()
|
|
inference_adapter = TogetherInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.together.TogetherProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"together_api_key": api_key})}):
|
|
together_client = inference_adapter._get_client()
|
|
assert together_client.client.api_key == api_key
|
|
openai_client = inference_adapter._get_openai_client()
|
|
assert openai_client.api_key == api_key
|
|
|
|
|
|
def test_llama_compat_provider_openai_client_caching():
|
|
"""Ensure the LlamaCompat provider does not cache api keys across client requests"""
|
|
config = LlamaCompatConfig()
|
|
inference_adapter = LlamaCompatInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.llama_openai_compat.config.LlamaProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"llama_api_key": api_key})}):
|
|
assert inference_adapter.client.api_key == api_key
|