mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(router.py): enable calling router with instructor
This commit is contained in:
parent
9d8f872f38
commit
1d635aff08
3 changed files with 17 additions and 6 deletions
|
@ -55,6 +55,8 @@ class Router:
|
||||||
if cache_responses:
|
if cache_responses:
|
||||||
litellm.cache = litellm.Cache(**cache_config) # use Redis for caching completion requests
|
litellm.cache = litellm.Cache(**cache_config) # use Redis for caching completion requests
|
||||||
self.cache_responses = cache_responses
|
self.cache_responses = cache_responses
|
||||||
|
|
||||||
|
self.chat = litellm.Chat(params={})
|
||||||
|
|
||||||
|
|
||||||
def _start_health_check_thread(self):
|
def _start_health_check_thread(self):
|
||||||
|
|
|
@ -9,13 +9,21 @@
|
||||||
# ) # Adds the parent directory to the system path
|
# ) # Adds the parent directory to the system path
|
||||||
# import litellm
|
# import litellm
|
||||||
# litellm.set_verbose = True
|
# litellm.set_verbose = True
|
||||||
# from litellm import LiteLLM
|
# from litellm import Router
|
||||||
# import instructor
|
# import instructor
|
||||||
# from pydantic import BaseModel
|
# from pydantic import BaseModel
|
||||||
|
|
||||||
# # This enables response_model keyword
|
# # This enables response_model keyword
|
||||||
# # from client.chat.completions.create
|
# # from client.chat.completions.create
|
||||||
# client = instructor.patch(LiteLLM())
|
# client = instructor.patch(Router(model_list=[{
|
||||||
|
# "model_name": "gpt-3.5-turbo", # openai model name
|
||||||
|
# "litellm_params": { # params for litellm completion/embedding call
|
||||||
|
# "model": "azure/chatgpt-v-2",
|
||||||
|
# "api_key": os.getenv("AZURE_API_KEY"),
|
||||||
|
# "api_version": os.getenv("AZURE_API_VERSION"),
|
||||||
|
# "api_base": os.getenv("AZURE_API_BASE")
|
||||||
|
# }
|
||||||
|
# }]))
|
||||||
|
|
||||||
# class UserDetail(BaseModel):
|
# class UserDetail(BaseModel):
|
||||||
# name: str
|
# name: str
|
||||||
|
@ -31,4 +39,6 @@
|
||||||
|
|
||||||
# assert isinstance(user, UserDetail)
|
# assert isinstance(user, UserDetail)
|
||||||
# assert user.name == "Jason"
|
# assert user.name == "Jason"
|
||||||
# assert user.age == 25
|
# assert user.age == 25
|
||||||
|
|
||||||
|
# print(f"user: {user}")
|
|
@ -45,9 +45,8 @@ def test_completion_custom_provider_model_name():
|
||||||
def test_completion_with_num_retries():
|
def test_completion_with_num_retries():
|
||||||
try:
|
try:
|
||||||
response = completion(model="j2-ultra", messages=[{"messages": "vibe", "bad": "message"}], num_retries=2)
|
response = completion(model="j2-ultra", messages=[{"messages": "vibe", "bad": "message"}], num_retries=2)
|
||||||
except openai.APIError as e:
|
|
||||||
pass
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"Unmapped exception occurred")
|
pytest.fail(f"Unmapped exception occurred")
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
# test_completion_with_num_retries()
|
# test_completion_with_num_retries()
|
Loading…
Add table
Add a link
Reference in a new issue