diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 1fc43667b..451f4ceda 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -484,7 +484,7 @@ class OpenAITextCompletion(BaseLLM): elif optional_params.get("stream", False): return self.streaming(logging_obj=logging_obj, api_base=api_base, data=data, headers=headers, model_response=model_response, model=model) else: - response = self._client_session.post( + response = httpx.post( url=f"{api_base}", json=data, headers=headers, @@ -546,7 +546,7 @@ class OpenAITextCompletion(BaseLLM): model_response: ModelResponse, model: str ): - with self._client_session.stream( + with httpx.stream( url=f"{api_base}", json=data, headers=headers, diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index b2a3ee9ba..7e60c43d3 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -432,6 +432,7 @@ def test_completion_text_openai(): def test_completion_openai_with_optional_params(): try: + litellm.set_verbose = True response = completion( model="gpt-3.5-turbo", messages=messages, @@ -450,6 +451,7 @@ test_completion_openai_with_optional_params() def test_completion_openai_litellm_key(): try: + litellm.set_verbose = False litellm.api_key = os.environ['OPENAI_API_KEY'] # ensure key is set to None in .env and in openai.api_key