mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
* fix(utils.py): return citations for perplexity streaming Fixes https://github.com/BerriAI/litellm/issues/5535 * fix(anthropic/chat.py): support fallbacks for anthropic streaming (#5542) * fix(anthropic/chat.py): support fallbacks for anthropic streaming Fixes https://github.com/BerriAI/litellm/issues/5512 * fix(anthropic/chat.py): use module level http client if none given (prevents early client closure) * fix: fix linting errors * fix(http_handler.py): fix raise_for_status error handling * test: retry flaky test * fix otel type * fix(bedrock/embed): fix error raising * test(test_openai_batches_and_files.py): skip azure batches test (for now) quota exceeded * fix(test_router.py): skip azure batch route test (for now) - hit batch quota limits --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> * All `model_group_alias` should show up in `/models`, `/model/info` , `/model_group/info` (#5539) * fix(router.py): support returning model_alias model names in `/v1/models` * fix(proxy_server.py): support returning model alias'es on `/model/info` * feat(router.py): support returning model group alias for `/model_group/info` * fix(proxy_server.py): fix linting errors * fix(proxy_server.py): fix linting errors * build(model_prices_and_context_window.json): add amazon titan text premier pricing information Closes https://github.com/BerriAI/litellm/issues/5560 * feat(litellm_logging.py): log standard logging response object for pass through endpoints. Allows bedrock /invoke agent calls to be correctly logged to langfuse + s3 * fix(success_handler.py): fix linting error * fix(success_handler.py): fix linting errors * fix(team_endpoints.py): Allows admin to update team member budgets --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
52 lines
No EOL
1.5 KiB
JSON
52 lines
No EOL
1.5 KiB
JSON
{
|
|
"name": "Python 3.11",
|
|
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
|
"image": "mcr.microsoft.com/devcontainers/python:3.11-bookworm",
|
|
// https://github.com/devcontainers/images/tree/main/src/python
|
|
// https://mcr.microsoft.com/en-us/product/devcontainers/python/tags
|
|
|
|
// "build": {
|
|
// "dockerfile": "Dockerfile",
|
|
// "context": ".."
|
|
// },
|
|
|
|
// Features to add to the dev container. More info: https://containers.dev/features.
|
|
// "features": {},
|
|
|
|
// Configure tool-specific properties.
|
|
"customizations": {
|
|
// Configure properties specific to VS Code.
|
|
"vscode": {
|
|
"settings": {},
|
|
"extensions": [
|
|
"ms-python.python",
|
|
"ms-python.vscode-pylance",
|
|
"GitHub.copilot",
|
|
"GitHub.copilot-chat",
|
|
"ms-python.autopep8"
|
|
]
|
|
}
|
|
},
|
|
|
|
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
|
"forwardPorts": [4000],
|
|
|
|
"containerEnv": {
|
|
"LITELLM_LOG": "DEBUG"
|
|
},
|
|
|
|
// Use 'portsAttributes' to set default properties for specific forwarded ports.
|
|
// More info: https://containers.dev/implementors/json_reference/#port-attributes
|
|
"portsAttributes": {
|
|
"4000": {
|
|
"label": "LiteLLM Server",
|
|
"onAutoForward": "notify"
|
|
}
|
|
},
|
|
|
|
// More info: https://aka.ms/dev-containers-non-root.
|
|
// "remoteUser": "litellm",
|
|
|
|
// Use 'postCreateCommand' to run commands after the container is created.
|
|
"postCreateCommand": "pipx install poetry && poetry install -E extra_proxy -E proxy"
|
|
} |