From 5e2fd49dd326ff2267cd6e4055f7f6dbc99fd183 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 24 Apr 2025 05:59:25 +0200 Subject: [PATCH] Fix typos (#10232) --- docs/my-website/docs/completion/audio.md | 2 +- .../docs/completion/document_understanding.md | 2 +- docs/my-website/docs/completion/vision.md | 2 +- .../observability/greenscale_integration.md | 2 +- .../observability/langfuse_integration.md | 2 +- .../my-website/docs/pass_through/vertex_ai.md | 2 +- docs/my-website/docs/providers/anthropic.md | 2 +- docs/my-website/docs/providers/azure.md | 2 +- docs/my-website/docs/providers/vertex.md | 6 ++-- docs/my-website/docs/proxy/admin_ui_sso.md | 4 +-- docs/my-website/docs/proxy/alerting.md | 8 ++--- docs/my-website/docs/proxy/custom_pricing.md | 4 +-- docs/my-website/docs/proxy/db_deadlocks.md | 2 +- docs/my-website/docs/proxy/deploy.md | 6 ++-- docs/my-website/docs/proxy/enterprise.md | 4 +-- .../docs/proxy/guardrails/quick_start.md | 2 +- docs/my-website/docs/proxy/logging.md | 32 +++++++++---------- docs/my-website/docs/proxy/prod.md | 4 +-- .../docs/proxy/temporary_budget_increase.md | 2 +- docs/my-website/docs/proxy/ui_credentials.md | 2 +- docs/my-website/docs/proxy/virtual_keys.md | 4 +-- docs/my-website/docs/simple_proxy_old_doc.md | 14 ++++---- .../my-website/docs/tutorials/compare_llms.md | 2 +- .../docs/tutorials/gradio_integration.md | 2 +- 24 files changed, 57 insertions(+), 57 deletions(-) diff --git a/docs/my-website/docs/completion/audio.md b/docs/my-website/docs/completion/audio.md index 97153a5867..96b5e4f41c 100644 --- a/docs/my-website/docs/completion/audio.md +++ b/docs/my-website/docs/completion/audio.md @@ -3,7 +3,7 @@ import TabItem from '@theme/TabItem'; # Using Audio Models -How to send / receieve audio to a `/chat/completions` endpoint +How to send / receive audio to a `/chat/completions` endpoint ## Audio Output from a model diff --git a/docs/my-website/docs/completion/document_understanding.md b/docs/my-website/docs/completion/document_understanding.md index f58b836c63..acebb2e160 100644 --- a/docs/my-website/docs/completion/document_understanding.md +++ b/docs/my-website/docs/completion/document_understanding.md @@ -3,7 +3,7 @@ import TabItem from '@theme/TabItem'; # Using PDF Input -How to send / receieve pdf's (other document types) to a `/chat/completions` endpoint +How to send / receive pdf's (other document types) to a `/chat/completions` endpoint Works for: - Vertex AI models (Gemini + Anthropic) diff --git a/docs/my-website/docs/completion/vision.md b/docs/my-website/docs/completion/vision.md index 1e18109b3b..7670008486 100644 --- a/docs/my-website/docs/completion/vision.md +++ b/docs/my-website/docs/completion/vision.md @@ -194,7 +194,7 @@ Expected Response ## Explicitly specify image type -If you have images without a mime-type, or if litellm is incorrectly inferring the mime type of your image (e.g. calling `gs://` url's with vertex ai), you can set this explicity via the `format` param. +If you have images without a mime-type, or if litellm is incorrectly inferring the mime type of your image (e.g. calling `gs://` url's with vertex ai), you can set this explicitly via the `format` param. ```python "image_url": { diff --git a/docs/my-website/docs/observability/greenscale_integration.md b/docs/my-website/docs/observability/greenscale_integration.md index 49eadc6453..c9b00cd0e8 100644 --- a/docs/my-website/docs/observability/greenscale_integration.md +++ b/docs/my-website/docs/observability/greenscale_integration.md @@ -53,7 +53,7 @@ response = completion( ## Additional information in metadata -You can send any additional information to Greenscale by using the `metadata` field in completion and `greenscale_` prefix. This can be useful for sending metadata about the request, such as the project and application name, customer_id, enviornment, or any other information you want to track usage. `greenscale_project` and `greenscale_application` are required fields. +You can send any additional information to Greenscale by using the `metadata` field in completion and `greenscale_` prefix. This can be useful for sending metadata about the request, such as the project and application name, customer_id, environment, or any other information you want to track usage. `greenscale_project` and `greenscale_application` are required fields. ```python #openai call with additional metadata diff --git a/docs/my-website/docs/observability/langfuse_integration.md b/docs/my-website/docs/observability/langfuse_integration.md index 9727730363..576135ba67 100644 --- a/docs/my-website/docs/observability/langfuse_integration.md +++ b/docs/my-website/docs/observability/langfuse_integration.md @@ -185,7 +185,7 @@ curl --location --request POST 'http://0.0.0.0:4000/chat/completions' \ * `trace_release` - Release for the trace, defaults to `None` * `trace_metadata` - Metadata for the trace, defaults to `None` * `trace_user_id` - User identifier for the trace, defaults to completion argument `user` -* `tags` - Tags for the trace, defeaults to `None` +* `tags` - Tags for the trace, defaults to `None` ##### Updatable Parameters on Continuation diff --git a/docs/my-website/docs/pass_through/vertex_ai.md b/docs/my-website/docs/pass_through/vertex_ai.md index f40dfa70eb..b99f0fcf98 100644 --- a/docs/my-website/docs/pass_through/vertex_ai.md +++ b/docs/my-website/docs/pass_through/vertex_ai.md @@ -222,7 +222,7 @@ curl http://localhost:4000/vertex-ai/v1/projects/${PROJECT_ID}/locations/us-cent LiteLLM Proxy Server supports two methods of authentication to Vertex AI: -1. Pass Vertex Credetials client side to proxy server +1. Pass Vertex Credentials client side to proxy server 2. Set Vertex AI credentials on proxy server diff --git a/docs/my-website/docs/providers/anthropic.md b/docs/my-website/docs/providers/anthropic.md index 9e4f6908a4..95323719f0 100644 --- a/docs/my-website/docs/providers/anthropic.md +++ b/docs/my-website/docs/providers/anthropic.md @@ -1095,7 +1095,7 @@ response = completion( print(response.choices[0]) ``` - + 1. Add model to config diff --git a/docs/my-website/docs/providers/azure.md b/docs/my-website/docs/providers/azure.md index e58d8a7b5d..2ea444b029 100644 --- a/docs/my-website/docs/providers/azure.md +++ b/docs/my-website/docs/providers/azure.md @@ -483,7 +483,7 @@ response.stream_to_file(speech_file_path) This is a walkthrough on how to use Azure Active Directory Tokens - Microsoft Entra ID to make `litellm.completion()` calls Step 1 - Download Azure CLI -Installation instructons: https://learn.microsoft.com/en-us/cli/azure/install-azure-cli +Installation instructions: https://learn.microsoft.com/en-us/cli/azure/install-azure-cli ```shell brew update && brew install azure-cli ``` diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index 762bd5f332..7f60de27ee 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -692,7 +692,7 @@ curl http://0.0.0.0:4000/v1/chat/completions \ ### **Context Caching** -Use Vertex AI context caching is supported by calling provider api directly. (Unified Endpoint support comin soon.). +Use Vertex AI context caching is supported by calling provider api directly. (Unified Endpoint support coming soon.). [**Go straight to provider**](../pass_through/vertex_ai.md#context-caching) @@ -910,7 +910,7 @@ export VERTEXAI_PROJECT="my-test-project" # ONLY use if model project is differe ## Specifying Safety Settings -In certain use-cases you may need to make calls to the models and pass [safety settigns](https://ai.google.dev/docs/safety_setting_gemini) different from the defaults. To do so, simple pass the `safety_settings` argument to `completion` or `acompletion`. For example: +In certain use-cases you may need to make calls to the models and pass [safety settings](https://ai.google.dev/docs/safety_setting_gemini) different from the defaults. To do so, simple pass the `safety_settings` argument to `completion` or `acompletion`. For example: ### Set per model/request @@ -2050,7 +2050,7 @@ response = completion( print(response.choices[0]) ``` - + 1. Add model to config diff --git a/docs/my-website/docs/proxy/admin_ui_sso.md b/docs/my-website/docs/proxy/admin_ui_sso.md index 0bbba57fd9..a0dde80e9c 100644 --- a/docs/my-website/docs/proxy/admin_ui_sso.md +++ b/docs/my-website/docs/proxy/admin_ui_sso.md @@ -243,12 +243,12 @@ We allow you to pass a local image or a an http/https url of your image Set `UI_LOGO_PATH` on your env. We recommend using a hosted image, it's a lot easier to set up and configure / debug -Exaple setting Hosted image +Example setting Hosted image ```shell UI_LOGO_PATH="https://litellm-logo-aws-marketplace.s3.us-west-2.amazonaws.com/berriai-logo-github.png" ``` -Exaple setting a local image (on your container) +Example setting a local image (on your container) ```shell UI_LOGO_PATH="ui_images/logo.jpg" ``` diff --git a/docs/my-website/docs/proxy/alerting.md b/docs/my-website/docs/proxy/alerting.md index c2fc510d96..e2f6223c8f 100644 --- a/docs/my-website/docs/proxy/alerting.md +++ b/docs/my-website/docs/proxy/alerting.md @@ -213,7 +213,7 @@ model_list: general_settings: master_key: sk-1234 alerting: ["slack"] - alerting_threshold: 0.0001 # (Seconds) set an artifically low threshold for testing alerting + alerting_threshold: 0.0001 # (Seconds) set an artificially low threshold for testing alerting alert_to_webhook_url: { "llm_exceptions": "https://hooks.slack.com/services/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", "llm_too_slow": "https://hooks.slack.com/services/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", @@ -247,7 +247,7 @@ model_list: general_settings: master_key: sk-1234 alerting: ["slack"] - alerting_threshold: 0.0001 # (Seconds) set an artifically low threshold for testing alerting + alerting_threshold: 0.0001 # (Seconds) set an artificially low threshold for testing alerting alert_to_webhook_url: { "llm_exceptions": ["os.environ/SLACK_WEBHOOK_URL", "os.environ/SLACK_WEBHOOK_URL_2"], "llm_too_slow": ["https://webhook.site/7843a980-a494-4967-80fb-d502dbc16886", "https://webhook.site/28cfb179-f4fb-4408-8129-729ff55cf213"], @@ -425,7 +425,7 @@ curl -X GET --location 'http://0.0.0.0:4000/health/services?service=webhook' \ - `projected_exceeded_date` *str or null*: The date when the budget is projected to be exceeded, returned when 'soft_budget' is set for key (optional). - `projected_spend` *float or null*: The projected spend amount, returned when 'soft_budget' is set for key (optional). - `event` *Literal["budget_crossed", "threshold_crossed", "projected_limit_exceeded"]*: The type of event that triggered the webhook. Possible values are: - * "spend_tracked": Emitted whenver spend is tracked for a customer id. + * "spend_tracked": Emitted whenever spend is tracked for a customer id. * "budget_crossed": Indicates that the spend has exceeded the max budget. * "threshold_crossed": Indicates that spend has crossed a threshold (currently sent when 85% and 95% of budget is reached). * "projected_limit_exceeded": For "key" only - Indicates that the projected spend is expected to exceed the soft budget threshold. @@ -480,7 +480,7 @@ LLM-related Alerts | `cooldown_deployment` | Alerts when a deployment is put into cooldown | ✅ | | `new_model_added` | Notifications when a new model is added to litellm proxy through /model/new| ✅ | | `outage_alerts` | Alerts when a specific LLM deployment is facing an outage | ✅ | -| `region_outage_alerts` | Alerts when a specfic LLM region is facing an outage. Example us-east-1 | ✅ | +| `region_outage_alerts` | Alerts when a specific LLM region is facing an outage. Example us-east-1 | ✅ | Budget and Spend Alerts diff --git a/docs/my-website/docs/proxy/custom_pricing.md b/docs/my-website/docs/proxy/custom_pricing.md index 792d5c26dd..e2df7721bf 100644 --- a/docs/my-website/docs/proxy/custom_pricing.md +++ b/docs/my-website/docs/proxy/custom_pricing.md @@ -56,7 +56,7 @@ model_list: model: azure/ api_key: os.environ/AZURE_API_KEY api_base: os.environ/AZURE_API_BASE - api_version: os.envrion/AZURE_API_VERSION + api_version: os.environ/AZURE_API_VERSION model_info: input_cost_per_token: 0.000421 # 👈 ONLY to track cost per token output_cost_per_token: 0.000520 # 👈 ONLY to track cost per token @@ -133,4 +133,4 @@ acompletion( If these keys are not present, LiteLLM will not use your custom pricing. -If the problem persists, please file an issue on [GitHub](https://github.com/BerriAI/litellm/issues). \ No newline at end of file +If the problem persists, please file an issue on [GitHub](https://github.com/BerriAI/litellm/issues). diff --git a/docs/my-website/docs/proxy/db_deadlocks.md b/docs/my-website/docs/proxy/db_deadlocks.md index 332374995d..0eee928fa6 100644 --- a/docs/my-website/docs/proxy/db_deadlocks.md +++ b/docs/my-website/docs/proxy/db_deadlocks.md @@ -19,7 +19,7 @@ LiteLLM writes `UPDATE` and `UPSERT` queries to the DB. When using 10+ instances ### Stage 1. Each instance writes updates to redis -Each instance will accumlate the spend updates for a key, user, team, etc and write the updates to a redis queue. +Each instance will accumulate the spend updates for a key, user, team, etc and write the updates to a redis queue.

diff --git a/docs/my-website/docs/proxy/deploy.md b/docs/my-website/docs/proxy/deploy.md index 011778f584..d57686dc78 100644 --- a/docs/my-website/docs/proxy/deploy.md +++ b/docs/my-website/docs/proxy/deploy.md @@ -22,7 +22,7 @@ echo 'LITELLM_MASTER_KEY="sk-1234"' > .env # Add the litellm salt key - you cannot change this after adding a model # It is used to encrypt / decrypt your LLM API Key credentials -# We recommned - https://1password.com/password-generator/ +# We recommend - https://1password.com/password-generator/ # password generator to get a random hash for litellm salt key echo 'LITELLM_SALT_KEY="sk-1234"' >> .env @@ -125,7 +125,7 @@ CMD ["--port", "4000", "--config", "config.yaml", "--detailed_debug"] ### Build from litellm `pip` package -Follow these instructons to build a docker container from the litellm pip package. If your company has a strict requirement around security / building images you can follow these steps. +Follow these instructions to build a docker container from the litellm pip package. If your company has a strict requirement around security / building images you can follow these steps. Dockerfile @@ -999,7 +999,7 @@ services: - "4000:4000" # Map the container port to the host, change the host port if necessary volumes: - ./litellm-config.yaml:/app/config.yaml # Mount the local configuration file - # You can change the port or number of workers as per your requirements or pass any new supported CLI augument. Make sure the port passed here matches with the container port defined above in `ports` value + # You can change the port or number of workers as per your requirements or pass any new supported CLI argument. Make sure the port passed here matches with the container port defined above in `ports` value command: [ "--config", "/app/config.yaml", "--port", "4000", "--num_workers", "8" ] # ...rest of your docker-compose config if any diff --git a/docs/my-website/docs/proxy/enterprise.md b/docs/my-website/docs/proxy/enterprise.md index fb0945d488..26dc9d4950 100644 --- a/docs/my-website/docs/proxy/enterprise.md +++ b/docs/my-website/docs/proxy/enterprise.md @@ -691,7 +691,7 @@ curl --request POST \ -**Successfull Request** +**Successful Request** ```shell curl --location 'http://0.0.0.0:4000/key/generate' \ @@ -729,7 +729,7 @@ curl --location 'http://0.0.0.0:4000/key/generate' \ -**Successfull Request** +**Successful Request** ```shell curl http://localhost:4000/chat/completions \ diff --git a/docs/my-website/docs/proxy/guardrails/quick_start.md b/docs/my-website/docs/proxy/guardrails/quick_start.md index aeac507e0a..55cfa98d48 100644 --- a/docs/my-website/docs/proxy/guardrails/quick_start.md +++ b/docs/my-website/docs/proxy/guardrails/quick_start.md @@ -164,7 +164,7 @@ curl -i http://localhost:4000/v1/chat/completions \ **Expected response** -Your response headers will incude `x-litellm-applied-guardrails` with the guardrail applied +Your response headers will include `x-litellm-applied-guardrails` with the guardrail applied ``` x-litellm-applied-guardrails: aporia-pre-guard diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index c8731dd270..ad4cababc0 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -277,7 +277,7 @@ Found under `kwargs["standard_logging_object"]`. This is a standard payload, log ## Langfuse -We will use the `--config` to set `litellm.success_callback = ["langfuse"]` this will log all successfull LLM calls to langfuse. Make sure to set `LANGFUSE_PUBLIC_KEY` and `LANGFUSE_SECRET_KEY` in your environment +We will use the `--config` to set `litellm.success_callback = ["langfuse"]` this will log all successful LLM calls to langfuse. Make sure to set `LANGFUSE_PUBLIC_KEY` and `LANGFUSE_SECRET_KEY` in your environment **Step 1** Install langfuse @@ -534,15 +534,15 @@ print(response) Use this if you want to control which LiteLLM-specific fields are logged as tags by the LiteLLM proxy. By default LiteLLM Proxy logs no LiteLLM-specific fields -| LiteLLM specific field | Description | Example Value | -|------------------------|-------------------------------------------------------|------------------------------------------------| -| `cache_hit` | Indicates whether a cache hit occured (True) or not (False) | `true`, `false` | -| `cache_key` | The Cache key used for this request | `d2b758c****`| -| `proxy_base_url` | The base URL for the proxy server, the value of env var `PROXY_BASE_URL` on your server | `https://proxy.example.com`| -| `user_api_key_alias` | An alias for the LiteLLM Virtual Key.| `prod-app1` | -| `user_api_key_user_id` | The unique ID associated with a user's API key. | `user_123`, `user_456` | -| `user_api_key_user_email` | The email associated with a user's API key. | `user@example.com`, `admin@example.com` | -| `user_api_key_team_alias` | An alias for a team associated with an API key. | `team_alpha`, `dev_team` | +| LiteLLM specific field | Description | Example Value | +|---------------------------|-----------------------------------------------------------------------------------------|------------------------------------------------| +| `cache_hit` | Indicates whether a cache hit occurred (True) or not (False) | `true`, `false` | +| `cache_key` | The Cache key used for this request | `d2b758c****` | +| `proxy_base_url` | The base URL for the proxy server, the value of env var `PROXY_BASE_URL` on your server | `https://proxy.example.com` | +| `user_api_key_alias` | An alias for the LiteLLM Virtual Key. | `prod-app1` | +| `user_api_key_user_id` | The unique ID associated with a user's API key. | `user_123`, `user_456` | +| `user_api_key_user_email` | The email associated with a user's API key. | `user@example.com`, `admin@example.com` | +| `user_api_key_team_alias` | An alias for a team associated with an API key. | `team_alpha`, `dev_team` | **Usage** @@ -1190,7 +1190,7 @@ We will use the `--config` to set - `litellm.success_callback = ["s3"]` -This will log all successfull LLM calls to s3 Bucket +This will log all successful LLM calls to s3 Bucket **Step 1** Set AWS Credentials in .env @@ -1279,7 +1279,7 @@ Log LLM Logs to [Azure Data Lake Storage](https://learn.microsoft.com/en-us/azur | Property | Details | |----------|---------| -| Description | Log LLM Input/Output to Azure Blob Storag (Bucket) | +| Description | Log LLM Input/Output to Azure Blob Storage (Bucket) | | Azure Docs on Data Lake Storage | [Azure Data Lake Storage](https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-introduction) | @@ -1360,7 +1360,7 @@ LiteLLM Supports logging to the following Datdog Integrations: -We will use the `--config` to set `litellm.callbacks = ["datadog"]` this will log all successfull LLM calls to DataDog +We will use the `--config` to set `litellm.callbacks = ["datadog"]` this will log all successful LLM calls to DataDog **Step 1**: Create a `config.yaml` file and set `litellm_settings`: `success_callback` @@ -1636,7 +1636,7 @@ class MyCustomHandler(CustomLogger): litellm_params = kwargs.get("litellm_params", {}) metadata = litellm_params.get("metadata", {}) # headers passed to LiteLLM proxy, can be found here - # Acess Exceptions & Traceback + # Access Exceptions & Traceback exception_event = kwargs.get("exception", None) traceback_event = kwargs.get("traceback_exception", None) @@ -2205,7 +2205,7 @@ We will use the `--config` to set - `litellm.success_callback = ["dynamodb"]` - `litellm.dynamodb_table_name = "your-table-name"` -This will log all successfull LLM calls to DynamoDB +This will log all successful LLM calls to DynamoDB **Step 1** Set AWS Credentials in .env @@ -2370,7 +2370,7 @@ litellm --test [Athina](https://athina.ai/) allows you to log LLM Input/Output for monitoring, analytics, and observability. -We will use the `--config` to set `litellm.success_callback = ["athina"]` this will log all successfull LLM calls to athina +We will use the `--config` to set `litellm.success_callback = ["athina"]` this will log all successful LLM calls to athina **Step 1** Set Athina API key diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md index 2d09502d52..e1b8336401 100644 --- a/docs/my-website/docs/proxy/prod.md +++ b/docs/my-website/docs/proxy/prod.md @@ -61,7 +61,7 @@ CMD ["--port", "4000", "--config", "./proxy_server_config.yaml"] ## 3. Use Redis 'port','host', 'password'. NOT 'redis_url' -If you decide to use Redis, DO NOT use 'redis_url'. We recommend usig redis port, host, and password params. +If you decide to use Redis, DO NOT use 'redis_url'. We recommend using redis port, host, and password params. `redis_url`is 80 RPS slower @@ -169,7 +169,7 @@ If you plan on using the DB, set a salt key for encrypting/decrypting variables Do not change this after adding a model. It is used to encrypt / decrypt your LLM API Key credentials -We recommned - https://1password.com/password-generator/ password generator to get a random hash for litellm salt key. +We recommend - https://1password.com/password-generator/ password generator to get a random hash for litellm salt key. ```bash export LITELLM_SALT_KEY="sk-1234" diff --git a/docs/my-website/docs/proxy/temporary_budget_increase.md b/docs/my-website/docs/proxy/temporary_budget_increase.md index 917ff0d6b5..de985eb9bd 100644 --- a/docs/my-website/docs/proxy/temporary_budget_increase.md +++ b/docs/my-website/docs/proxy/temporary_budget_increase.md @@ -3,7 +3,7 @@ Set temporary budget increase for a LiteLLM Virtual Key. Use this if you get asked to increase the budget for a key temporarily. -| Heirarchy | Supported | +| Hierarchy | Supported | |-----------|-----------| | LiteLLM Virtual Key | ✅ | | User | ❌ | diff --git a/docs/my-website/docs/proxy/ui_credentials.md b/docs/my-website/docs/proxy/ui_credentials.md index ba9d1c4c66..40db536859 100644 --- a/docs/my-website/docs/proxy/ui_credentials.md +++ b/docs/my-website/docs/proxy/ui_credentials.md @@ -4,7 +4,7 @@ import TabItem from '@theme/TabItem'; # Adding LLM Credentials -You can add LLM provider credentials on the UI. Once you add credentials you can re-use them when adding new models +You can add LLM provider credentials on the UI. Once you add credentials you can reuse them when adding new models ## Add a credential + model diff --git a/docs/my-website/docs/proxy/virtual_keys.md b/docs/my-website/docs/proxy/virtual_keys.md index 04be4ade48..26ec69b30d 100644 --- a/docs/my-website/docs/proxy/virtual_keys.md +++ b/docs/my-website/docs/proxy/virtual_keys.md @@ -23,7 +23,7 @@ Requirements: - ** Set on config.yaml** set your master key under `general_settings:master_key`, example below - ** Set env variable** set `LITELLM_MASTER_KEY` -(the proxy Dockerfile checks if the `DATABASE_URL` is set and then intializes the DB connection) +(the proxy Dockerfile checks if the `DATABASE_URL` is set and then initializes the DB connection) ```shell export DATABASE_URL=postgresql://:@:/ @@ -333,7 +333,7 @@ curl http://localhost:4000/v1/chat/completions \ **Expected Response** -Expect to see a successfull response from the litellm proxy since the key passed in `X-Litellm-Key` is valid +Expect to see a successful response from the litellm proxy since the key passed in `X-Litellm-Key` is valid ```shell {"id":"chatcmpl-f9b2b79a7c30477ab93cd0e717d1773e","choices":[{"finish_reason":"stop","index":0,"message":{"content":"\n\nHello there, how may I assist you today?","role":"assistant","tool_calls":null,"function_call":null}}],"created":1677652288,"model":"gpt-3.5-turbo-0125","object":"chat.completion","system_fingerprint":"fp_44709d6fcb","usage":{"completion_tokens":12,"prompt_tokens":9,"total_tokens":21} ``` diff --git a/docs/my-website/docs/simple_proxy_old_doc.md b/docs/my-website/docs/simple_proxy_old_doc.md index 64491b1ea8..730fd0aab4 100644 --- a/docs/my-website/docs/simple_proxy_old_doc.md +++ b/docs/my-website/docs/simple_proxy_old_doc.md @@ -994,16 +994,16 @@ litellm --health ## Logging Proxy Input/Output - OpenTelemetry -### Step 1 Start OpenTelemetry Collecter Docker Container +### Step 1 Start OpenTelemetry Collector Docker Container This container sends logs to your selected destination -#### Install OpenTelemetry Collecter Docker Image +#### Install OpenTelemetry Collector Docker Image ```shell docker pull otel/opentelemetry-collector:0.90.0 docker run -p 127.0.0.1:4317:4317 -p 127.0.0.1:55679:55679 otel/opentelemetry-collector:0.90.0 ``` -#### Set Destination paths on OpenTelemetry Collecter +#### Set Destination paths on OpenTelemetry Collector Here's the OpenTelemetry yaml config to use with Elastic Search ```yaml @@ -1077,7 +1077,7 @@ general_settings: LiteLLM will read the `OTEL_ENDPOINT` environment variable to send data to your OTEL collector ```python -os.environ['OTEL_ENDPOINT'] # defauls to 127.0.0.1:4317 if not provided +os.environ['OTEL_ENDPOINT'] # defaults to 127.0.0.1:4317 if not provided ``` #### Start LiteLLM Proxy @@ -1101,8 +1101,8 @@ curl --location 'http://0.0.0.0:4000/chat/completions' \ ``` -#### Test & View Logs on OpenTelemetry Collecter -On successfull logging you should be able to see this log on your `OpenTelemetry Collecter` Docker Container +#### Test & View Logs on OpenTelemetry Collector +On successful logging you should be able to see this log on your `OpenTelemetry Collector` Docker Container ```shell Events: SpanEvent #0 @@ -1149,7 +1149,7 @@ Here's the log view on Elastic Search. You can see the request `input`, `output` ## Logging Proxy Input/Output - Langfuse -We will use the `--config` to set `litellm.success_callback = ["langfuse"]` this will log all successfull LLM calls to langfuse +We will use the `--config` to set `litellm.success_callback = ["langfuse"]` this will log all successful LLM calls to langfuse **Step 1** Install langfuse diff --git a/docs/my-website/docs/tutorials/compare_llms.md b/docs/my-website/docs/tutorials/compare_llms.md index a7eda2c85f..d7fdf8d7d9 100644 --- a/docs/my-website/docs/tutorials/compare_llms.md +++ b/docs/my-website/docs/tutorials/compare_llms.md @@ -117,7 +117,7 @@ response = completion("command-nightly", messages) """ -# qustions/logs you want to run the LLM on +# questions/logs you want to run the LLM on questions = [ "what is litellm?", "why should I use LiteLLM", diff --git a/docs/my-website/docs/tutorials/gradio_integration.md b/docs/my-website/docs/tutorials/gradio_integration.md index 4854fc2ac9..021815d937 100644 --- a/docs/my-website/docs/tutorials/gradio_integration.md +++ b/docs/my-website/docs/tutorials/gradio_integration.md @@ -30,7 +30,7 @@ def inference(message, history): yield partial_message except Exception as e: print("Exception encountered:", str(e)) - yield f"An Error occured please 'Clear' the error and try your question again" + yield f"An Error occurred please 'Clear' the error and try your question again" ``` ### Define Chat Interface