mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
(feat) access metadata in embedding kwargs()
This commit is contained in:
parent
509eb4a228
commit
0e3f7ea28f
2 changed files with 8 additions and 2 deletions
|
@ -1783,6 +1783,7 @@ def embedding(
|
|||
rpm = kwargs.pop("rpm", None)
|
||||
tpm = kwargs.pop("tpm", None)
|
||||
model_info = kwargs.get("model_info", None)
|
||||
metadata = kwargs.get("metadata", None)
|
||||
proxy_server_request = kwargs.get("proxy_server_request", None)
|
||||
aembedding = kwargs.pop("aembedding", None)
|
||||
openai_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout", "api_base", "api_version", "api_key", "deployment_id", "organization", "base_url", "default_headers", "timeout", "response_format", "seed", "tools", "tool_choice", "max_retries", "encoding_format"]
|
||||
|
@ -1798,7 +1799,7 @@ def embedding(
|
|||
try:
|
||||
response = None
|
||||
logging = litellm_logging_obj
|
||||
logging.update_environment_variables(model=model, user="", optional_params=optional_params, litellm_params={"timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info})
|
||||
logging.update_environment_variables(model=model, user="", optional_params=optional_params, litellm_params={"timeout": timeout, "azure": azure, "litellm_call_id": litellm_call_id, "logger_fn": logger_fn, "proxy_server_request": proxy_server_request, "model_info": model_info, "metadata": metadata})
|
||||
if azure == True or custom_llm_provider == "azure":
|
||||
# azure configs
|
||||
api_type = get_secret("AZURE_API_TYPE") or "azure"
|
||||
|
|
|
@ -73,8 +73,10 @@ def test_chat_completion(client):
|
|||
assert my_custom_logger.async_success == True # checks if the status of async_success is True, only the async_log_success_event can set this to true
|
||||
assert my_custom_logger.async_completion_kwargs["model"] == "chatgpt-v-2" # checks if kwargs passed to async_log_success_event are correct
|
||||
print("\n\n Custom Logger Async Completion args", my_custom_logger.async_completion_kwargs)
|
||||
|
||||
litellm_params = my_custom_logger.async_completion_kwargs.get("litellm_params")
|
||||
metadata = litellm_params.get("metadata", None)
|
||||
print("\n\n Metadata in custom logger kwargs", litellm_params.get("metadata"))
|
||||
assert metadata is not None
|
||||
config_model_info = litellm_params.get("model_info")
|
||||
proxy_server_request_object = litellm_params.get("proxy_server_request")
|
||||
|
||||
|
@ -174,6 +176,9 @@ def test_embedding(client):
|
|||
|
||||
kwargs = my_custom_logger.async_embedding_kwargs
|
||||
litellm_params = kwargs.get("litellm_params")
|
||||
metadata = litellm_params.get("metadata", None)
|
||||
print("\n\n Metadata in custom logger kwargs", litellm_params.get("metadata"))
|
||||
assert metadata is not None
|
||||
proxy_server_request = litellm_params.get("proxy_server_request")
|
||||
model_info = litellm_params.get("model_info")
|
||||
assert proxy_server_request == {'url': 'http://testserver/embeddings', 'method': 'POST', 'headers': {'host': 'testserver', 'accept': '*/*', 'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive', 'user-agent': 'testclient', 'authorization': 'Bearer sk-1234', 'content-length': '54', 'content-type': 'application/json'}, 'body': {'model': 'azure-embedding-model', 'input': ['hello']}}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue