diff --git a/docs/advanced.md b/docs/advanced.md index 403e60755..c4e695744 100644 --- a/docs/advanced.md +++ b/docs/advanced.md @@ -20,4 +20,31 @@ embedding = client.embedding response = completion(model="gpt-3.5-turbo", messages=messages) ``` +## Calling Embeddings and Sending Data to Sentry/Posthog/etc. +To call embeddings and send data to Sentry, Posthog, and other similar services, you need to initialize the `litellm_client` with the appropriate callbacks for success and failure. Here is an example of how to do this: +```python +# init liteLLM client with callbacks +client = litellm_client(success_callback=["posthog"], failure_callback=["sentry", "posthog"]) + +# use the embedding method of the client +embedding = client.embedding +response = embedding(model="gpt-3.5-turbo", input=messages) +``` + +You also need to set the necessary environment variables for the services like Sentry and Posthog. Here is how you can do this: + +```python +# set env variables for Sentry and Posthog +os.environ['SENTRY_API_URL'] = "your-sentry-api-url" +os.environ['POSTHOG_API_KEY'] = "your-posthog-api-key" +os.environ['POSTHOG_API_URL'] = "your-posthog-api-url" +``` + +### Calling Embeddings without the Client +If you prefer not to use the `litellm_client`, you can call embeddings directly from the `litellm` module. Here is an example of how to do this: + +```python +from litellm import embedding +response = embedding(model="gpt-3.5-turbo", input=messages) +``` diff --git a/litellm/tests/test_bad_params.py b/litellm/tests/test_bad_params.py index dd1e8d509..255fb0d96 100644 --- a/litellm/tests/test_bad_params.py +++ b/litellm/tests/test_bad_params.py @@ -26,13 +26,13 @@ litellm.failure_callback = ["slack", "sentry", "posthog"] user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] -model_val = "krrish is a model" +model_val = "" def test_completion_with_empty_model(): # test on empty try: - response = completion(model=model_val, messages=messages) + response = completion(model="", messages=messages) except Exception as e: print(f"error occurred: {e}") pass diff --git a/litellm/tests/test_client.py b/litellm/tests/test_client.py index 9129b5853..a3e28ad8f 100644 --- a/litellm/tests/test_client.py +++ b/litellm/tests/test_client.py @@ -23,7 +23,7 @@ messages = [{ "content": user_message,"role": "user"}] def test_completion_openai(): try: - response = completion(model="gpt-3.5-turbo", messages=messages, logger_fn=logger_fn) + response = completion(model="updated-model", messages=messages, logger_fn=logger_fn) # Add any assertions here to check the response except Exception as e: pytest.fail(f"Error occurred: {e}") @@ -37,7 +37,7 @@ def test_completion_non_openai(): def test_embedding_openai(): try: - response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) + response = embedding(model='updated-model', input=[user_message], logger_fn=logger_fn) # Add any assertions here to check the response print(f"response: {str(response)[:50]}") except Exception as e: diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index adb55a45e..795803c38 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -12,7 +12,7 @@ messages = [{ "content": user_message,"role": "user"}] def test_completion_openai(): try: - response = completion(model="gpt-3.5-turbo", messages=messages) + response = completion(model="updated-model", messages=messages) # Add any assertions here to check the response print(response) except Exception as e: @@ -80,7 +80,7 @@ def test_completion_azure(): def test_completion_claude(): try: - response = completion(model="claude-instant-1", messages=messages) + response = completion(model="updated-model", messages=messages) # Add any assertions here to check the response print(response) except Exception as e: diff --git a/litellm/tests/test_logging.py b/litellm/tests/test_logging.py index 21e4a879c..2350b7d9b 100644 --- a/litellm/tests/test_logging.py +++ b/litellm/tests/test_logging.py @@ -10,14 +10,15 @@ from litellm import embedding, completion litellm.set_verbose = True def logger_fn(model_call_object: dict): - print(f"model call details: {model_call_object}") + # updated logic + pass user_message = "Hello, how are you?" messages = [{ "content": user_message,"role": "user"}] # test on openai completion call try: - response = completion(model="gpt-3.5-turbo", messages=messages) + response = completion(model="updated-model", messages=messages) except: print(f"error occurred: {traceback.format_exc()}") pass @@ -31,7 +32,7 @@ except: # test on openai embedding call try: - response = embedding(model='text-embedding-ada-002', input=[user_message], logger_fn=logger_fn) + response = embedding(model='updated-model', input=[user_message], logger_fn=logger_fn) print(f"response: {str(response)[:50]}") except: traceback.print_exc() diff --git a/litellm/tests/test_model_fallback.py b/litellm/tests/test_model_fallback.py index b389e9f6a..a0636cfeb 100644 --- a/litellm/tests/test_model_fallback.py +++ b/litellm/tests/test_model_fallback.py @@ -19,8 +19,8 @@ messages = [{ "content": user_message,"role": "user"}] for model in model_fallback_list: try: - response = embedding(model="text-embedding-ada-002", input=[user_message]) - response = completion(model=model, messages=messages) + response = embedding(model="updated-model", input=[user_message]) + response = completion(model="updated-model", messages=messages) print(response) except Exception as e: - print(f"error occurred: {traceback.format_exc()}") + print(f"error occurred: {traceback.format_exc()}") diff --git a/requirements.txt b/requirements.txt index 37bc975e4..410110ab1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,8 @@ -openai -cohere -func_timeout -anthropic -replicate -pytest -pytest -python-dotenv -openai[datalib] +openai==0.27.0 +cohere==4.18.0 +func_timeout==4.3.5 +anthropic==0.3.7 +replicate==0.10.0 +pytest==6.2.5 +python-dotenv==0.19.1 +openai[datalib]==0.27.0