mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 19:04:19 +00:00
# What does this PR do? - braintrust scoring provider requires OPENAI_API_KEY env variable to be set - move this to be able to be set as request headers (e.g. like together / fireworks api keys) - fixes pytest with agents dependency ## Test Plan **E2E** ``` llama stack run ``` ```yaml scoring: - provider_id: braintrust-0 provider_type: inline::braintrust config: {} ``` **Client** ```python self.client = LlamaStackClient( base_url=os.environ.get("LLAMA_STACK_ENDPOINT", "http://localhost:5000"), provider_data={ "openai_api_key": os.environ.get("OPENAI_API_KEY", ""), }, ) ``` - run `llama-stack-client eval run_scoring` **Unit Test** ``` pytest -v -s -m meta_reference_eval_together_inference eval/test_eval.py ``` ``` pytest -v -s -m braintrust_scoring_together_inference scoring/test_scoring.py --env OPENAI_API_KEY=$OPENAI_API_KEY ``` <img width="745" alt="image" src="https://github.com/user-attachments/assets/68f5cdda-f6c8-496d-8b4f-1b3dabeca9c2"> ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
60 lines
1.6 KiB
Python
60 lines
1.6 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import json
|
|
import logging
|
|
import threading
|
|
from typing import Any, Dict
|
|
|
|
from .utils.dynamic import instantiate_class_type
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
_THREAD_LOCAL = threading.local()
|
|
|
|
|
|
class NeedsRequestProviderData:
|
|
def get_request_provider_data(self) -> Any:
|
|
spec = self.__provider_spec__
|
|
assert spec, f"Provider spec not set on {self.__class__}"
|
|
|
|
provider_type = spec.provider_type
|
|
validator_class = spec.provider_data_validator
|
|
if not validator_class:
|
|
raise ValueError(f"Provider {provider_type} does not have a validator")
|
|
|
|
val = getattr(_THREAD_LOCAL, "provider_data_header_value", None)
|
|
if not val:
|
|
return None
|
|
|
|
validator = instantiate_class_type(validator_class)
|
|
try:
|
|
provider_data = validator(**val)
|
|
return provider_data
|
|
except Exception as e:
|
|
log.error(f"Error parsing provider data: {e}")
|
|
|
|
|
|
def set_request_provider_data(headers: Dict[str, str]):
|
|
keys = [
|
|
"X-LlamaStack-ProviderData",
|
|
"x-llamastack-providerdata",
|
|
]
|
|
for key in keys:
|
|
val = headers.get(key, None)
|
|
if val:
|
|
break
|
|
|
|
if not val:
|
|
return
|
|
|
|
try:
|
|
val = json.loads(val)
|
|
except json.JSONDecodeError:
|
|
log.error("Provider data not encoded as a JSON object!", val)
|
|
return
|
|
|
|
_THREAD_LOCAL.provider_data_header_value = val
|