litellm/docs/my-website/sidebars.js
Krish Dholakia 98c335acd0
LiteLLM Minor Fixes & Improvements (09/17/2024) (#5742)
* fix(proxy_server.py): use default azure credentials to support azure non-client secret kms

* fix(langsmith.py): raise error if credentials missing

* feat(langsmith.py): support error logging for langsmith + standard logging payload

Fixes https://github.com/BerriAI/litellm/issues/5738

* Fix hardcoding of schema in view check (#5749)

* fix - deal with case when check view exists returns None (#5740)

* Revert "fix - deal with case when check view exists returns None (#5740)" (#5741)

This reverts commit 535228159b.

* test(test_router_debug_logs.py): move to mock response

* Fix hardcoding of schema

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Krrish Dholakia <krrishdholakia@gmail.com>

* fix(proxy_server.py): allow admin to disable ui via `DISABLE_ADMIN_UI` flag

* fix(router.py): fix default model name value

Fixes 55db19a1e4 (r1763712148)

* fix(utils.py): fix unbound variable error

* feat(rerank/main.py): add azure ai rerank endpoints

Closes https://github.com/BerriAI/litellm/issues/5667

* feat(secret_detection.py): Allow configuring secret detection params

Allows admin to control what plugins to run for secret detection. Prevents overzealous secret detection.

* docs(secret_detection.md): add secret detection guardrail docs

* fix: fix linting errors

* fix - deal with case when check view exists returns None (#5740)

* Revert "fix - deal with case when check view exists returns None (#5740)" (#5741)

This reverts commit 535228159b.

* Litellm fix router testing (#5748)

* test: fix testing - azure changed content policy error logic

* test: fix tests to use mock responses

* test(test_image_generation.py): handle api instability

* test(test_image_generation.py): handle azure api instability

* fix(utils.py): fix unbounded variable error

* fix(utils.py): fix unbounded variable error

* test: refactor test to use mock response

* test: mark flaky azure tests

* Bump next from 14.1.1 to 14.2.10 in /ui/litellm-dashboard (#5753)

Bumps [next](https://github.com/vercel/next.js) from 14.1.1 to 14.2.10.
- [Release notes](https://github.com/vercel/next.js/releases)
- [Changelog](https://github.com/vercel/next.js/blob/canary/release.js)
- [Commits](https://github.com/vercel/next.js/compare/v14.1.1...v14.2.10)

---
updated-dependencies:
- dependency-name: next
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

* [Fix] o1-mini causes pydantic warnings on `reasoning_tokens`  (#5754)

* add requester_metadata in standard logging payload

* log requester_metadata in metadata

* use StandardLoggingPayload for logging

* docs StandardLoggingPayload

* fix import

* include standard logging object in failure

* add test for requester metadata

* handle completion_tokens_details

* add test for completion_tokens_details

* [Feat-Proxy-DataDog] Log Redis, Postgres Failure events on DataDog  (#5750)

* dd - start tracking redis status on dd

* add async_service_succes_hook / failure hook in custom logger

* add async_service_failure_hook

* log service failures on dd

* fix import error

* add test for redis errors / warning

* [Fix] Router/ Proxy - Tag Based routing, raise correct error when no deployments found and tag filtering is on  (#5745)

* fix tag routing - raise correct error when no model with tag based routing

* fix error string from tag based routing

* test router tag based routing

* raise 401 error when no tags avialable for deploymen

* linting fix

* [Feat] Log Request metadata on gcs bucket logging (#5743)

* add requester_metadata in standard logging payload

* log requester_metadata in metadata

* use StandardLoggingPayload for logging

* docs StandardLoggingPayload

* fix import

* include standard logging object in failure

* add test for requester metadata

* fix(litellm_logging.py): fix logging message

* fix(rerank_api/main.py): fix linting errors

* fix(custom_guardrails.py): maintain backwards compatibility for older guardrails

* fix(rerank_api/main.py): fix cost tracking for rerank endpoints

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: steffen-sbt <148480574+steffen-sbt@users.noreply.github.com>
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-17 23:00:04 -07:00

346 lines
10 KiB
JavaScript

/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// // By default, Docusaurus generates a sidebar from the docs folder structure
// But you can create a sidebar manually
tutorialSidebar: [
{ type: "doc", id: "index" }, // NEW
{
type: "category",
label: "💥 LiteLLM Proxy Server",
link: {
type: "generated-index",
title: "💥 LiteLLM Proxy Server (LLM Gateway)",
description: `OpenAI Proxy Server (LLM Gateway) to call 100+ LLMs in a unified interface & track spend, set budgets per virtual key/user`,
slug: "/simple_proxy",
},
items: [
"proxy/quick_start",
"proxy/docker_quick_start",
"proxy/deploy",
"proxy/prod",
{
type: "category",
label: "Architecture",
items: ["proxy/architecture"],
},
{
type: "link",
label: "📖 All Endpoints (Swagger)",
href: "https://litellm-api.up.railway.app/",
},
"proxy/enterprise",
"proxy/user_keys",
"proxy/demo",
"proxy/configs",
"proxy/reliability",
{
type: "category",
label: "Use with Provider SDKs",
items: [
"pass_through/vertex_ai",
"pass_through/google_ai_studio",
"pass_through/cohere",
"anthropic_completion",
"pass_through/bedrock",
"pass_through/langfuse"
],
},
"proxy/cost_tracking",
"proxy/custom_pricing",
"proxy/virtual_keys",
{
type: "category",
label: "Admin UI",
items: ["proxy/ui", "proxy/self_serve"],
},
{
type: "category",
label: "🪢 Logging, Alerting, Metrics",
items: ["proxy/logging", "proxy/bucket", "proxy/team_logging","proxy/streaming_logging", "proxy/alerting", "proxy/prometheus",],
},
{
type: "category",
label: "🛡️ [Beta] Guardrails",
items: [
"proxy/guardrails/quick_start",
"proxy/guardrails/aporia_api",
"proxy/guardrails/lakera_ai",
"proxy/guardrails/bedrock",
"proxy/guardrails/pii_masking_v2",
"proxy/guardrails/secret_detection",
"proxy/guardrails/custom_guardrail",
"prompt_injection"
],
},
{
type: "category",
label: "Secret Manager - storing LLM API Keys",
items: [
"secret",
"oidc"
]
},
"proxy/tag_routing",
"proxy/users",
"proxy/team_budgets",
"proxy/customers",
"proxy/billing",
"proxy/token_auth",
"proxy/oauth2",
"proxy/caching",
"proxy/pass_through",
"proxy/email",
"proxy/multiple_admins",
"proxy/team_based_routing",
"proxy/customer_routing",
{
type: "category",
label: "Extra Load Balancing",
items: ["proxy/load_balancing"],
},
"proxy/model_management",
"proxy/health",
"proxy/debugging",
"proxy/call_hooks",
"proxy/rules",
"proxy/cli",
]
},
{
type: "category",
label: "💯 Supported Models & Providers",
link: {
type: "generated-index",
title: "Providers",
description:
"Learn how to deploy + call models from different providers on LiteLLM",
slug: "/providers",
},
items: [
"providers/openai",
"providers/text_completion_openai",
"providers/openai_compatible",
"providers/azure",
"providers/azure_ai",
"providers/vertex",
"providers/gemini",
"providers/anthropic",
"providers/aws_sagemaker",
"providers/bedrock",
"providers/litellm_proxy",
"providers/mistral",
"providers/codestral",
"providers/cohere",
"providers/anyscale",
"providers/huggingface",
"providers/databricks",
"providers/watsonx",
"providers/predibase",
"providers/nvidia_nim",
"providers/cerebras",
"providers/volcano",
"providers/triton-inference-server",
"providers/ollama",
"providers/perplexity",
"providers/friendliai",
"providers/groq",
"providers/github",
"providers/deepseek",
"providers/fireworks_ai",
"providers/clarifai",
"providers/vllm",
"providers/xinference",
"providers/cloudflare_workers",
"providers/deepinfra",
"providers/ai21",
"providers/nlp_cloud",
"providers/replicate",
"providers/togetherai",
"providers/voyage",
"providers/aleph_alpha",
"providers/baseten",
"providers/openrouter",
"providers/palm",
"providers/sambanova",
// "providers/custom_openai_proxy",
"providers/custom_llm_server",
"providers/petals",
],
},
{
type: "category",
label: "Chat Completions (litellm.completion + PROXY)",
link: {
type: "generated-index",
title: "Chat Completions",
description: "Details on the completion() function",
slug: "/completion",
},
items: [
"completion/input",
"completion/provider_specific_params",
"completion/json_mode",
"completion/prefix",
"completion/drop_params",
"completion/prompt_formatting",
"completion/output",
"completion/usage",
"exception_mapping",
"completion/stream",
"completion/message_trimming",
"completion/function_call",
"completion/vision",
"completion/model_alias",
"completion/batching",
"completion/mock_requests",
"completion/reliable_completions",
],
},
{
type: "category",
label: "Supported Endpoints - /images, /audio/speech, /assistants etc",
items: [
"embedding/supported_embedding",
"image_generation",
"audio_transcription",
"text_to_speech",
"rerank",
"assistants",
"batches",
"fine_tuning",
{
type: "link",
label: "Use LiteLLM Proxy with Vertex, Bedrock SDK",
href: "/docs/pass_through/vertex_ai",
},
],
},
"scheduler",
{
type: "category",
label: "🚅 LiteLLM Python SDK",
items: [
"routing",
"set_keys",
"completion/token_usage",
"sdk_custom_pricing",
"embedding/async_embedding",
"embedding/moderation",
"budget_manager",
"caching/all_caches",
"migration",
{
type: "category",
label: "LangChain, LlamaIndex, Instructor Integration",
items: ["langchain/langchain", "tutorials/instructor"],
},
],
},
"load_test",
{
type: "category",
label: "Logging & Observability",
items: [
"observability/opentelemetry_integration",
"observability/langfuse_integration",
"observability/logfire_integration",
"observability/gcs_bucket_integration",
"observability/langsmith_integration",
"observability/arize_integration",
"debugging/local_debugging",
"observability/raw_request_response",
"observability/custom_callback",
"observability/scrub_data",
"observability/braintrust",
"observability/sentry",
"observability/lago",
"observability/helicone_integration",
"observability/openmeter",
"observability/promptlayer_integration",
"observability/wandb_integration",
"observability/slack_integration",
"observability/athina_integration",
"observability/lunary_integration",
"observability/greenscale_integration",
"observability/supabase_integration",
`observability/telemetry`,
],
},
{
type: "category",
label: "Tutorials",
items: [
'tutorials/litellm_proxy_aporia',
'tutorials/azure_openai',
'tutorials/instructor',
"tutorials/gradio_integration",
"tutorials/huggingface_codellama",
"tutorials/huggingface_tutorial",
"tutorials/TogetherAI_liteLLM",
"tutorials/finetuned_chat_gpt",
"tutorials/text_completion",
"tutorials/first_playground",
"tutorials/model_fallbacks",
],
},
{
type: "category",
label: "Extras",
items: [
"extras/contributing",
"data_security",
"migration_policy",
"contributing",
"proxy/pii_masking",
"rules",
"proxy_server",
{
type: "category",
label: "❤️ 🚅 Projects built on LiteLLM",
link: {
type: "generated-index",
title: "Projects built on LiteLLM",
description:
"Learn how to deploy + call models from different providers on LiteLLM",
slug: "/project",
},
items: [
"projects/Docq.AI",
"projects/OpenInterpreter",
"projects/dbally",
"projects/FastREPL",
"projects/PROMPTMETHEUS",
"projects/Codium PR Agent",
"projects/Prompt2Model",
"projects/SalesGPT",
"projects/Quivr",
"projects/Langstream",
"projects/Otter",
"projects/GPT Migrate",
"projects/YiVal",
"projects/LiteLLM Proxy",
"projects/llm_cord",
],
},
],
},
"troubleshoot",
],
};
module.exports = sidebars;