mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(proxy_server.py): return original model response via response headers - /v1/completions
to help devs with debugging
This commit is contained in:
parent
f17dd68df3
commit
15e0099948
4 changed files with 44 additions and 8 deletions
|
@ -1153,7 +1153,9 @@ class OpenAITextCompletion(BaseLLM):
|
|||
},
|
||||
)
|
||||
## RESPONSE OBJECT
|
||||
return TextCompletionResponse(**response_json)
|
||||
response_obj = TextCompletionResponse(**response_json)
|
||||
response_obj._hidden_params.original_response = json.dumps(response_json)
|
||||
return response_obj
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue