diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 9830074142..4519228a34 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -10,9 +10,9 @@ **Please complete all items before asking a LiteLLM maintainer to review your PR** -- [ ] I have Added testing in the `tests/litellm/` directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/contributing#2-adding-testing-to-your-pr) +- [ ] I have Added testing in the `tests/litellm/` directory, **Adding at least 1 test is a hard requirement** - [see details](https://docs.litellm.ai/docs/extras/contributing_code) - [ ] I have added a screenshot of my new test passing locally -- [ ] My PR passes all unit tests on `make unit-test` [https://docs.litellm.ai/docs/contributing] +- [ ] My PR passes all unit tests on (`make unit-test`)[https://docs.litellm.ai/docs/extras/contributing_code] - [ ] My PR's scope is as isolated as possible, it only solves 1 specific problem diff --git a/README.md b/README.md index 97ccc423da..2d2f71e4d1 100644 --- a/README.md +++ b/README.md @@ -340,7 +340,7 @@ curl 'http://0.0.0.0:4000/key/generate' \ ## Contributing -Interested in contributing? Contributions to LiteLLM Python SDK, Proxy Server, and contributing LLM integrations are both accepted and highly encouraged! [See our Contribution Guide for more details](https://docs.litellm.ai/docs/contributing) +Interested in contributing? Contributions to LiteLLM Python SDK, Proxy Server, and contributing LLM integrations are both accepted and highly encouraged! [See our Contribution Guide for more details](https://docs.litellm.ai/docs/extras/contributing_code) # Enterprise For companies that need better security, user management and professional support diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index b2a08544f9..2e740a3ca3 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1994,8 +1994,8 @@ "max_tokens": 8191, "max_input_tokens": 32000, "max_output_tokens": 8191, - "input_cost_per_token": 0.000001, - "output_cost_per_token": 0.000003, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.0000003, "litellm_provider": "mistral", "supports_function_calling": true, "mode": "chat", @@ -2006,8 +2006,8 @@ "max_tokens": 8191, "max_input_tokens": 32000, "max_output_tokens": 8191, - "input_cost_per_token": 0.000001, - "output_cost_per_token": 0.000003, + "input_cost_per_token": 0.0000001, + "output_cost_per_token": 0.0000003, "litellm_provider": "mistral", "supports_function_calling": true, "mode": "chat",