mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix: fixing mypy linting errors and being backwards compatible for azure=true flag
This commit is contained in:
parent
25d8f45817
commit
060a2e40b2
7 changed files with 17 additions and 7 deletions
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -26,7 +26,7 @@ class AnthropicConfig():
|
||||||
to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
|
to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
|
||||||
"""
|
"""
|
||||||
max_tokens_to_sample: Optional[int]=256 # anthropic requires a default
|
max_tokens_to_sample: Optional[int]=256 # anthropic requires a default
|
||||||
stop_sequences: Optional[list[str]]=None
|
stop_sequences: Optional[list]=None
|
||||||
temperature: Optional[int]=None
|
temperature: Optional[int]=None
|
||||||
top_p: Optional[int]=None
|
top_p: Optional[int]=None
|
||||||
top_k: Optional[int]=None
|
top_k: Optional[int]=None
|
||||||
|
@ -34,7 +34,7 @@ class AnthropicConfig():
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
max_tokens_to_sample: Optional[int]=256, # anthropic requires a default
|
max_tokens_to_sample: Optional[int]=256, # anthropic requires a default
|
||||||
stop_sequences: Optional[list[str]]=None,
|
stop_sequences: Optional[list]=None,
|
||||||
temperature: Optional[int]=None,
|
temperature: Optional[int]=None,
|
||||||
top_p: Optional[int]=None,
|
top_p: Optional[int]=None,
|
||||||
top_k: Optional[int]=None,
|
top_k: Optional[int]=None,
|
||||||
|
|
|
@ -223,7 +223,7 @@ def completion(
|
||||||
fallbacks = kwargs.get('fallbacks', [])
|
fallbacks = kwargs.get('fallbacks', [])
|
||||||
######## end of unpacking kwargs ###########
|
######## end of unpacking kwargs ###########
|
||||||
openai_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout"]
|
openai_params = ["functions", "function_call", "temperature", "temperature", "top_p", "n", "stream", "stop", "max_tokens", "presence_penalty", "frequency_penalty", "logit_bias", "user", "request_timeout"]
|
||||||
litellm_params = ["metadata", "acompletion", "caching", "return_async", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "metadata", "fallbacks"]
|
litellm_params = ["metadata", "acompletion", "caching", "return_async", "mock_response", "api_key", "api_version", "api_base", "force_timeout", "logger_fn", "verbose", "custom_llm_provider", "litellm_logging_obj", "litellm_call_id", "use_client", "id", "metadata", "fallbacks", "azure"]
|
||||||
default_params = openai_params + litellm_params
|
default_params = openai_params + litellm_params
|
||||||
non_default_params = {k: v for k,v in kwargs.items() if k not in default_params} # model-specific params - pass them straight to the model/provider
|
non_default_params = {k: v for k,v in kwargs.items() if k not in default_params} # model-specific params - pass them straight to the model/provider
|
||||||
if mock_response:
|
if mock_response:
|
||||||
|
@ -239,6 +239,8 @@ def completion(
|
||||||
] # update the model to the actual value if an alias has been passed in
|
] # update the model to the actual value if an alias has been passed in
|
||||||
model_response = ModelResponse()
|
model_response = ModelResponse()
|
||||||
|
|
||||||
|
if kwargs['azure'] == True: # don't remove flag check, to remain backwards compatible for repos like Codium
|
||||||
|
custom_llm_provider="azure"
|
||||||
if deployment_id != None: # azure llms
|
if deployment_id != None: # azure llms
|
||||||
model=deployment_id
|
model=deployment_id
|
||||||
custom_llm_provider="azure"
|
custom_llm_provider="azure"
|
||||||
|
|
|
@ -365,7 +365,7 @@ def test_completion_openai():
|
||||||
litellm.api_key = None
|
litellm.api_key = None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_openai()
|
# test_completion_openai()
|
||||||
|
|
||||||
|
|
||||||
def test_completion_openai_prompt():
|
def test_completion_openai_prompt():
|
||||||
|
@ -570,17 +570,25 @@ def test_completion_openai_with_more_optional_params():
|
||||||
|
|
||||||
def test_completion_azure():
|
def test_completion_azure():
|
||||||
try:
|
try:
|
||||||
print("azure gpt-3.5 test\n\n")
|
litellm.set_verbose=True
|
||||||
|
## Test azure call
|
||||||
response = completion(
|
response = completion(
|
||||||
model="azure/chatgpt-v-2",
|
model="azure/chatgpt-v-2",
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
azure=True
|
||||||
|
)
|
||||||
|
## Test azure flag for backwards compatibility
|
||||||
|
response = completion(
|
||||||
|
model="chatgpt-v-2",
|
||||||
|
messages=messages,
|
||||||
|
azure=True
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# test_completion_azure()
|
test_completion_azure()
|
||||||
|
|
||||||
# new azure test for using litellm. vars,
|
# new azure test for using litellm. vars,
|
||||||
# use the following vars in this test and make an azure_api_call
|
# use the following vars in this test and make an azure_api_call
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue