forked from phoenix/litellm-mirror
fix(utils): remove ui to view error message
This commit is contained in:
parent
f9f2dcc7ea
commit
e1ee2890b9
2 changed files with 11 additions and 26 deletions
|
@ -137,7 +137,7 @@ $ litellm --model command-nightly
|
|||
|
||||
[**Jump to Code**](https://github.com/BerriAI/litellm/blob/fef4146396d5d87006259e00095a62e3900d6bb4/litellm/proxy.py#L36)
|
||||
|
||||
## [Tutorial]: Use with Aider/AutoGen/Continue-Dev/Langroid
|
||||
## [Tutorial]: Use with Aider/AutoGen/Continue-Dev/Langroid/etc.
|
||||
|
||||
Here's how to use the proxy to test codellama/mistral/etc. models for different github repos
|
||||
|
||||
|
@ -242,6 +242,16 @@ task.run()
|
|||
|
||||
Credits [@pchalasani](https://github.com/pchalasani) and [Langroid](https://github.com/langroid/langroid) for this tutorial.
|
||||
</TabItem>
|
||||
<TabItem value="gpt-pilot" label="GPT-Pilot">
|
||||
GPT-Pilot helps you build apps with AI Agents. [For more](https://github.com/Pythagora-io/gpt-pilot)
|
||||
|
||||
In your .env set the openai endpoint to your local server.
|
||||
|
||||
```
|
||||
OPENAI_ENDPOINT=http://0.0.0.0:8000
|
||||
OPENAI_API_KEY=my-fake-key
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
:::note
|
||||
|
|
|
@ -2311,31 +2311,6 @@ def exception_type(
|
|||
print("\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m")
|
||||
print("LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True'.")
|
||||
print()
|
||||
if litellm.set_verbose == True:
|
||||
litellm.error_logs['EXCEPTION'] = original_exception
|
||||
litellm.error_logs['KWARGS'] = completion_kwargs
|
||||
try:
|
||||
# code to show users their litellm error dashboard
|
||||
import urllib.parse
|
||||
import json
|
||||
for log_key in litellm.error_logs:
|
||||
current_logs = litellm.error_logs[log_key]
|
||||
if type(current_logs) == dict:
|
||||
filtered_error_logs = {key: str(value) for key, value in current_logs.items()}
|
||||
litellm.error_logs[log_key] = filtered_error_logs
|
||||
else:
|
||||
litellm.error_logs[log_key] = str(current_logs)
|
||||
|
||||
# Convert the filtered_error_logs dictionary to a JSON string
|
||||
error_logs_json = json.dumps(litellm.error_logs)
|
||||
# URL-encode the JSON data
|
||||
encoded_data = urllib.parse.quote(error_logs_json)
|
||||
|
||||
print("👉 view error logs:")
|
||||
print("\033[91m" + '\033[4m' + 'https://logs.litellm.ai/?data=' + str(encoded_data) + "\033[0m")
|
||||
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
if isinstance(original_exception, OriginalError):
|
||||
# Handle the OpenAIError
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue