mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-23 05:09:41 +00:00
Some of our inference providers support passthrough authentication via `x-llamastack-provider-data` header values. This fixes the providers that support passthrough auth to not cache their clients to the backend providers (mostly OpenAI client instances) so that the client connecting to Llama Stack has to provide those auth values on each and every request. Signed-off-by: Ben Browning <bbrownin@redhat.com>
73 lines
3.1 KiB
Python
73 lines
3.1 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import json
|
|
from unittest.mock import MagicMock
|
|
|
|
from llama_stack.distribution.request_headers import request_provider_data_context
|
|
from llama_stack.providers.remote.inference.groq.config import GroqConfig
|
|
from llama_stack.providers.remote.inference.groq.groq import GroqInferenceAdapter
|
|
from llama_stack.providers.remote.inference.openai.config import OpenAIConfig
|
|
from llama_stack.providers.remote.inference.openai.openai import OpenAIInferenceAdapter
|
|
from llama_stack.providers.remote.inference.together.config import TogetherImplConfig
|
|
from llama_stack.providers.remote.inference.together.together import TogetherInferenceAdapter
|
|
|
|
|
|
def test_groq_provider_openai_client_caching():
|
|
"""Ensure the Groq provider does not cache api keys across client requests"""
|
|
|
|
config = GroqConfig()
|
|
inference_adapter = GroqInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.groq.config.GroqProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context(
|
|
{"x-llamastack-provider-data": json.dumps({inference_adapter.provider_data_api_key_field: api_key})}
|
|
):
|
|
openai_client = inference_adapter._get_openai_client()
|
|
assert openai_client.api_key == api_key
|
|
|
|
|
|
def test_openai_provider_openai_client_caching():
|
|
"""Ensure the OpenAI provider does not cache api keys across client requests"""
|
|
|
|
config = OpenAIConfig()
|
|
inference_adapter = OpenAIInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.openai.config.OpenAIProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context(
|
|
{"x-llamastack-provider-data": json.dumps({inference_adapter.provider_data_api_key_field: api_key})}
|
|
):
|
|
openai_client = inference_adapter._get_openai_client()
|
|
assert openai_client.api_key == api_key
|
|
|
|
|
|
def test_together_provider_openai_client_caching():
|
|
"""Ensure the Together provider does not cache api keys across client requests"""
|
|
|
|
config = TogetherImplConfig()
|
|
inference_adapter = TogetherInferenceAdapter(config)
|
|
|
|
inference_adapter.__provider_spec__ = MagicMock()
|
|
inference_adapter.__provider_spec__.provider_data_validator = (
|
|
"llama_stack.providers.remote.inference.together.TogetherProviderDataValidator"
|
|
)
|
|
|
|
for api_key in ["test1", "test2"]:
|
|
with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"together_api_key": api_key})}):
|
|
together_client = inference_adapter._get_client()
|
|
assert together_client.client.api_key == api_key
|
|
openai_client = inference_adapter._get_openai_client()
|
|
assert openai_client.api_key == api_key
|