diff --git a/.github/workflows/interpret_load_test.py b/.github/workflows/interpret_load_test.py index b52d4d2b3..9d95c768f 100644 --- a/.github/workflows/interpret_load_test.py +++ b/.github/workflows/interpret_load_test.py @@ -77,6 +77,9 @@ if __name__ == "__main__": new_release_body = ( existing_release_body + "\n\n" + + "### Don't want to maintain your internal proxy? get in touch 🎉" + + "\nHosted Proxy Alpha: https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat" + + "\n\n" + "## Load Test LiteLLM Proxy Results" + "\n\n" + markdown_table diff --git a/.gitignore b/.gitignore index 309f726fe..abc4ecb0c 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,5 @@ kub.yaml loadtest_kub.yaml litellm/proxy/_new_secret_config.yaml litellm/proxy/_new_secret_config.yaml +litellm/proxy/_super_secret_config.yaml +litellm/proxy/_super_secret_config.yaml diff --git a/README.md b/README.md index 6c81181f3..38a166935 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@
Call all LLM APIs using the OpenAI format [Bedrock, Huggingface, VertexAI, TogetherAI, Azure, OpenAI, etc.]