diff --git a/README.md b/README.md index 8868dc8ccd..fe7d56b6c4 100644 --- a/README.md +++ b/README.md @@ -225,37 +225,37 @@ curl 'http://0.0.0.0:4000/key/generate' \ ## Supported Providers ([Docs](https://docs.litellm.ai/docs/providers)) | Provider | [Completion](https://docs.litellm.ai/docs/#basic-usage) | [Streaming](https://docs.litellm.ai/docs/completion/stream#streaming-responses) | [Async Completion](https://docs.litellm.ai/docs/completion/stream#async-completion) | [Async Streaming](https://docs.litellm.ai/docs/completion/stream#async-streaming) | [Async Embedding](https://docs.litellm.ai/docs/embedding/supported_embedding) | [Async Image Generation](https://docs.litellm.ai/docs/image_generation) | -| ----------------------------------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| [openai](https://docs.litellm.ai/docs/providers/openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| [azure](https://docs.litellm.ai/docs/providers/azure) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | -| [aws - sagemaker](https://docs.litellm.ai/docs/providers/aws_sagemaker) | ✅ | ✅ | ✅ | ✅ | ✅ | -| [aws - bedrock](https://docs.litellm.ai/docs/providers/bedrock) | ✅ | ✅ | ✅ | ✅ | ✅ | -| [google - vertex_ai](https://docs.litellm.ai/docs/providers/vertex) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ -| [google - palm](https://docs.litellm.ai/docs/providers/palm) | ✅ | ✅ | ✅ | ✅ | -| [google AI Studio - gemini](https://docs.litellm.ai/docs/providers/gemini) | ✅ | ✅ | ✅ | ✅ | | -| [mistral ai api](https://docs.litellm.ai/docs/providers/mistral) | ✅ | ✅ | ✅ | ✅ | ✅ | -| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ | -| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ | -| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ | -| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ | -| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ | -| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ | -| [openrouter](https://docs.litellm.ai/docs/providers/openrouter) | ✅ | ✅ | ✅ | ✅ | -| [ai21](https://docs.litellm.ai/docs/providers/ai21) | ✅ | ✅ | ✅ | ✅ | -| [baseten](https://docs.litellm.ai/docs/providers/baseten) | ✅ | ✅ | ✅ | ✅ | -| [vllm](https://docs.litellm.ai/docs/providers/vllm) | ✅ | ✅ | ✅ | ✅ | -| [nlp_cloud](https://docs.litellm.ai/docs/providers/nlp_cloud) | ✅ | ✅ | ✅ | ✅ | -| [aleph alpha](https://docs.litellm.ai/docs/providers/aleph_alpha) | ✅ | ✅ | ✅ | ✅ | -| [petals](https://docs.litellm.ai/docs/providers/petals) | ✅ | ✅ | ✅ | ✅ | -| [ollama](https://docs.litellm.ai/docs/providers/ollama) | ✅ | ✅ | ✅ | ✅ | ✅ | -| [deepinfra](https://docs.litellm.ai/docs/providers/deepinfra) | ✅ | ✅ | ✅ | ✅ | -| [perplexity-ai](https://docs.litellm.ai/docs/providers/perplexity) | ✅ | ✅ | ✅ | ✅ | -| [Groq AI](https://docs.litellm.ai/docs/providers/groq) | ✅ | ✅ | ✅ | ✅ | -| [Deepseek](https://docs.litellm.ai/docs/providers/deepseek) | ✅ | ✅ | ✅ | ✅ | -| [anyscale](https://docs.litellm.ai/docs/providers/anyscale) | ✅ | ✅ | ✅ | ✅ | -| [IBM - watsonx.ai](https://docs.litellm.ai/docs/providers/watsonx) | ✅ | ✅ | ✅ | ✅ | ✅ -| [voyage ai](https://docs.litellm.ai/docs/providers/voyage) | | | | | ✅ | -| [xinference [Xorbits Inference]](https://docs.litellm.ai/docs/providers/xinference) | | | | | ✅ | +|-------------------------------------------------------------------------------------|---------------------------------------------------------|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|-------------------------------------------------------------------------------|-------------------------------------------------------------------------| +| [openai](https://docs.litellm.ai/docs/providers/openai) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| [azure](https://docs.litellm.ai/docs/providers/azure) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| [aws - sagemaker](https://docs.litellm.ai/docs/providers/aws_sagemaker) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [aws - bedrock](https://docs.litellm.ai/docs/providers/bedrock) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [google - vertex_ai](https://docs.litellm.ai/docs/providers/vertex) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | +| [google - palm](https://docs.litellm.ai/docs/providers/palm) | ✅ | ✅ | ✅ | ✅ | | | +| [google AI Studio - gemini](https://docs.litellm.ai/docs/providers/gemini) | ✅ | ✅ | ✅ | ✅ | | | +| [mistral ai api](https://docs.litellm.ai/docs/providers/mistral) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ | | | +| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ | | | +| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ | | | +| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ | | | +| [openrouter](https://docs.litellm.ai/docs/providers/openrouter) | ✅ | ✅ | ✅ | ✅ | | | +| [ai21](https://docs.litellm.ai/docs/providers/ai21) | ✅ | ✅ | ✅ | ✅ | | | +| [baseten](https://docs.litellm.ai/docs/providers/baseten) | ✅ | ✅ | ✅ | ✅ | | | +| [vllm](https://docs.litellm.ai/docs/providers/vllm) | ✅ | ✅ | ✅ | ✅ | | | +| [nlp_cloud](https://docs.litellm.ai/docs/providers/nlp_cloud) | ✅ | ✅ | ✅ | ✅ | | | +| [aleph alpha](https://docs.litellm.ai/docs/providers/aleph_alpha) | ✅ | ✅ | ✅ | ✅ | | | +| [petals](https://docs.litellm.ai/docs/providers/petals) | ✅ | ✅ | ✅ | ✅ | | | +| [ollama](https://docs.litellm.ai/docs/providers/ollama) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [deepinfra](https://docs.litellm.ai/docs/providers/deepinfra) | ✅ | ✅ | ✅ | ✅ | | | +| [perplexity-ai](https://docs.litellm.ai/docs/providers/perplexity) | ✅ | ✅ | ✅ | ✅ | | | +| [Groq AI](https://docs.litellm.ai/docs/providers/groq) | ✅ | ✅ | ✅ | ✅ | | | +| [Deepseek](https://docs.litellm.ai/docs/providers/deepseek) | ✅ | ✅ | ✅ | ✅ | | | +| [anyscale](https://docs.litellm.ai/docs/providers/anyscale) | ✅ | ✅ | ✅ | ✅ | | | +| [IBM - watsonx.ai](https://docs.litellm.ai/docs/providers/watsonx) | ✅ | ✅ | ✅ | ✅ | ✅ | | +| [voyage ai](https://docs.litellm.ai/docs/providers/voyage) | | | | | ✅ | | +| [xinference [Xorbits Inference]](https://docs.litellm.ai/docs/providers/xinference) | | | | | ✅ | | [**Read the Docs**](https://docs.litellm.ai/docs/) diff --git a/docs/my-website/docs/observability/langfuse_integration.md b/docs/my-website/docs/observability/langfuse_integration.md index 6dd5377ea7..07970f599d 100644 --- a/docs/my-website/docs/observability/langfuse_integration.md +++ b/docs/my-website/docs/observability/langfuse_integration.md @@ -144,6 +144,26 @@ print(response) ``` +You can also pass `metadata` as part of the request header with a `langfuse_*` prefix: + +```shell +curl --location 'http://0.0.0.0:4000/chat/completions' \ + --header 'Content-Type: application/json' \ + --header 'langfuse_trace_id: trace-id22' \ + --header 'langfuse_trace_user_id: user-id2' \ + --header 'langfuse_trace_metadata: {"key":"value"}' \ + --data '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ] +}' +``` + + ### Trace & Generation Parameters #### Trace Specific Parameters diff --git a/docs/my-website/docs/providers/groq.md b/docs/my-website/docs/providers/groq.md index da453c3ceb..857aae5bd2 100644 --- a/docs/my-website/docs/providers/groq.md +++ b/docs/my-website/docs/providers/groq.md @@ -46,13 +46,13 @@ for chunk in response: ## Supported Models - ALL Groq Models Supported! We support ALL Groq models, just set `groq/` as a prefix when sending completion requests -| Model Name | Function Call | -|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| llama3-8b-8192 | `completion(model="groq/llama3-8b-8192", messages)` | -| llama3-70b-8192 | `completion(model="groq/llama3-70b-8192", messages)` | -| llama2-70b-4096 | `completion(model="groq/llama2-70b-4096", messages)` | +| Model Name | Function Call | +|--------------------|---------------------------------------------------------| +| llama3-8b-8192 | `completion(model="groq/llama3-8b-8192", messages)` | +| llama3-70b-8192 | `completion(model="groq/llama3-70b-8192", messages)` | +| llama2-70b-4096 | `completion(model="groq/llama2-70b-4096", messages)` | | mixtral-8x7b-32768 | `completion(model="groq/mixtral-8x7b-32768", messages)` | -| gemma-7b-it | `completion(model="groq/gemma-7b-it", messages)` | +| gemma-7b-it | `completion(model="groq/gemma-7b-it", messages)` | ## Groq - Tool / Function Calling Example diff --git a/docs/my-website/docs/providers/togetherai.md b/docs/my-website/docs/providers/togetherai.md index d718619f04..1021f5ba84 100644 --- a/docs/my-website/docs/providers/togetherai.md +++ b/docs/my-website/docs/providers/togetherai.md @@ -26,52 +26,52 @@ Example TogetherAI Usage - Note: liteLLM supports all models deployed on Togethe ### Llama LLMs - Chat -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| togethercomputer/llama-2-70b-chat | `completion('together_ai/togethercomputer/llama-2-70b-chat', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|-----------------------------------|-------------------------------------------------------------------------|------------------------------------| +| togethercomputer/llama-2-70b-chat | `completion('together_ai/togethercomputer/llama-2-70b-chat', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ### Llama LLMs - Language / Instruct -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| togethercomputer/llama-2-70b | `completion('together_ai/togethercomputer/llama-2-70b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| togethercomputer/LLaMA-2-7B-32K | `completion('together_ai/togethercomputer/LLaMA-2-7B-32K', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| togethercomputer/Llama-2-7B-32K-Instruct | `completion('together_ai/togethercomputer/Llama-2-7B-32K-Instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| togethercomputer/llama-2-7b | `completion('together_ai/togethercomputer/llama-2-7b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|------------------------------------------|--------------------------------------------------------------------------------|------------------------------------| +| togethercomputer/llama-2-70b | `completion('together_ai/togethercomputer/llama-2-70b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| togethercomputer/LLaMA-2-7B-32K | `completion('together_ai/togethercomputer/LLaMA-2-7B-32K', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| togethercomputer/Llama-2-7B-32K-Instruct | `completion('together_ai/togethercomputer/Llama-2-7B-32K-Instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| togethercomputer/llama-2-7b | `completion('together_ai/togethercomputer/llama-2-7b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ### Falcon LLMs -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| togethercomputer/falcon-40b-instruct | `completion('together_ai/togethercomputer/falcon-40b-instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| togethercomputer/falcon-7b-instruct | `completion('together_ai/togethercomputer/falcon-7b-instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|--------------------------------------|----------------------------------------------------------------------------|------------------------------------| +| togethercomputer/falcon-40b-instruct | `completion('together_ai/togethercomputer/falcon-40b-instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| togethercomputer/falcon-7b-instruct | `completion('together_ai/togethercomputer/falcon-7b-instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ### Alpaca LLMs -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| togethercomputer/alpaca-7b | `completion('together_ai/togethercomputer/alpaca-7b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|----------------------------|------------------------------------------------------------------|------------------------------------| +| togethercomputer/alpaca-7b | `completion('together_ai/togethercomputer/alpaca-7b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ### Other Chat LLMs -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| HuggingFaceH4/starchat-alpha | `completion('together_ai/HuggingFaceH4/starchat-alpha', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|------------------------------|--------------------------------------------------------------------|------------------------------------| +| HuggingFaceH4/starchat-alpha | `completion('together_ai/HuggingFaceH4/starchat-alpha', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ### Code LLMs -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| togethercomputer/CodeLlama-34b | `completion('together_ai/togethercomputer/CodeLlama-34b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| togethercomputer/CodeLlama-34b-Instruct | `completion('together_ai/togethercomputer/CodeLlama-34b-Instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| togethercomputer/CodeLlama-34b-Python | `completion('together_ai/togethercomputer/CodeLlama-34b-Python', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| defog/sqlcoder | `completion('together_ai/defog/sqlcoder', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| NumbersStation/nsql-llama-2-7B | `completion('together_ai/NumbersStation/nsql-llama-2-7B', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| WizardLM/WizardCoder-15B-V1.0 | `completion('together_ai/WizardLM/WizardCoder-15B-V1.0', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| WizardLM/WizardCoder-Python-34B-V1.0 | `completion('together_ai/WizardLM/WizardCoder-Python-34B-V1.0', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|-----------------------------------------|-------------------------------------------------------------------------------|------------------------------------| +| togethercomputer/CodeLlama-34b | `completion('together_ai/togethercomputer/CodeLlama-34b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| togethercomputer/CodeLlama-34b-Instruct | `completion('together_ai/togethercomputer/CodeLlama-34b-Instruct', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| togethercomputer/CodeLlama-34b-Python | `completion('together_ai/togethercomputer/CodeLlama-34b-Python', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| defog/sqlcoder | `completion('together_ai/defog/sqlcoder', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| NumbersStation/nsql-llama-2-7B | `completion('together_ai/NumbersStation/nsql-llama-2-7B', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| WizardLM/WizardCoder-15B-V1.0 | `completion('together_ai/WizardLM/WizardCoder-15B-V1.0', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| WizardLM/WizardCoder-Python-34B-V1.0 | `completion('together_ai/WizardLM/WizardCoder-Python-34B-V1.0', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ### Language LLMs -| Model Name | Function Call | Required OS Variables | -|-----------------------------------|------------------------------------------------------------------------|---------------------------------| -| NousResearch/Nous-Hermes-Llama2-13b | `completion('together_ai/NousResearch/Nous-Hermes-Llama2-13b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| Austism/chronos-hermes-13b | `completion('together_ai/Austism/chronos-hermes-13b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| upstage/SOLAR-0-70b-16bit | `completion('together_ai/upstage/SOLAR-0-70b-16bit', messages)` | `os.environ['TOGETHERAI_API_KEY']` | -| WizardLM/WizardLM-70B-V1.0 | `completion('together_ai/WizardLM/WizardLM-70B-V1.0', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Model Name | Function Call | Required OS Variables | +|-------------------------------------|---------------------------------------------------------------------------|------------------------------------| +| NousResearch/Nous-Hermes-Llama2-13b | `completion('together_ai/NousResearch/Nous-Hermes-Llama2-13b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| Austism/chronos-hermes-13b | `completion('together_ai/Austism/chronos-hermes-13b', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| upstage/SOLAR-0-70b-16bit | `completion('together_ai/upstage/SOLAR-0-70b-16bit', messages)` | `os.environ['TOGETHERAI_API_KEY']` | +| WizardLM/WizardLM-70B-V1.0 | `completion('together_ai/WizardLM/WizardLM-70B-V1.0', messages)` | `os.environ['TOGETHERAI_API_KEY']` | ## Prompt Templates diff --git a/docs/my-website/docs/providers/vllm.md b/docs/my-website/docs/providers/vllm.md index c22cd4fc2d..61dd1fffdc 100644 --- a/docs/my-website/docs/providers/vllm.md +++ b/docs/my-website/docs/providers/vllm.md @@ -155,14 +155,14 @@ def default_pt(messages): #### Models we already have Prompt Templates for -| Model Name | Works for Models | Function Call | -| -------- | -------- | -------- | -| meta-llama/Llama-2-7b-chat | All meta-llama llama2 chat models| `completion(model='vllm/meta-llama/Llama-2-7b', messages=messages, api_base="your_api_endpoint")` | -| tiiuae/falcon-7b-instruct | All falcon instruct models | `completion(model='vllm/tiiuae/falcon-7b-instruct', messages=messages, api_base="your_api_endpoint")` | -| mosaicml/mpt-7b-chat | All mpt chat models | `completion(model='vllm/mosaicml/mpt-7b-chat', messages=messages, api_base="your_api_endpoint")` | -| codellama/CodeLlama-34b-Instruct-hf | All codellama instruct models | `completion(model='vllm/codellama/CodeLlama-34b-Instruct-hf', messages=messages, api_base="your_api_endpoint")` | -| WizardLM/WizardCoder-Python-34B-V1.0 | All wizardcoder models | `completion(model='vllm/WizardLM/WizardCoder-Python-34B-V1.0', messages=messages, api_base="your_api_endpoint")` | -| Phind/Phind-CodeLlama-34B-v2 | All phind-codellama models | `completion(model='vllm/Phind/Phind-CodeLlama-34B-v2', messages=messages, api_base="your_api_endpoint")` | +| Model Name | Works for Models | Function Call | +|--------------------------------------|-----------------------------------|------------------------------------------------------------------------------------------------------------------| +| meta-llama/Llama-2-7b-chat | All meta-llama llama2 chat models | `completion(model='vllm/meta-llama/Llama-2-7b', messages=messages, api_base="your_api_endpoint")` | +| tiiuae/falcon-7b-instruct | All falcon instruct models | `completion(model='vllm/tiiuae/falcon-7b-instruct', messages=messages, api_base="your_api_endpoint")` | +| mosaicml/mpt-7b-chat | All mpt chat models | `completion(model='vllm/mosaicml/mpt-7b-chat', messages=messages, api_base="your_api_endpoint")` | +| codellama/CodeLlama-34b-Instruct-hf | All codellama instruct models | `completion(model='vllm/codellama/CodeLlama-34b-Instruct-hf', messages=messages, api_base="your_api_endpoint")` | +| WizardLM/WizardCoder-Python-34B-V1.0 | All wizardcoder models | `completion(model='vllm/WizardLM/WizardCoder-Python-34B-V1.0', messages=messages, api_base="your_api_endpoint")` | +| Phind/Phind-CodeLlama-34B-v2 | All phind-codellama models | `completion(model='vllm/Phind/Phind-CodeLlama-34B-v2', messages=messages, api_base="your_api_endpoint")` | #### Custom prompt templates diff --git a/docs/my-website/docs/providers/watsonx.md b/docs/my-website/docs/providers/watsonx.md index d8c5740a86..7a42a54edd 100644 --- a/docs/my-website/docs/providers/watsonx.md +++ b/docs/my-website/docs/providers/watsonx.md @@ -251,23 +251,23 @@ response = completion( Here are some examples of models available in IBM watsonx.ai that you can use with LiteLLM: -| Mode Name | Command | -| ---------- | --------- | -| Flan T5 XXL | `completion(model=watsonx/google/flan-t5-xxl, messages=messages)` | -| Flan Ul2 | `completion(model=watsonx/google/flan-ul2, messages=messages)` | -| Mt0 XXL | `completion(model=watsonx/bigscience/mt0-xxl, messages=messages)` | -| Gpt Neox | `completion(model=watsonx/eleutherai/gpt-neox-20b, messages=messages)` | -| Mpt 7B Instruct2 | `completion(model=watsonx/ibm/mpt-7b-instruct2, messages=messages)` | -| Starcoder | `completion(model=watsonx/bigcode/starcoder, messages=messages)` | -| Llama 2 70B Chat | `completion(model=watsonx/meta-llama/llama-2-70b-chat, messages=messages)` | -| Llama 2 13B Chat | `completion(model=watsonx/meta-llama/llama-2-13b-chat, messages=messages)` | -| Granite 13B Instruct | `completion(model=watsonx/ibm/granite-13b-instruct-v1, messages=messages)` | -| Granite 13B Chat | `completion(model=watsonx/ibm/granite-13b-chat-v1, messages=messages)` | -| Flan T5 XL | `completion(model=watsonx/google/flan-t5-xl, messages=messages)` | -| Granite 13B Chat V2 | `completion(model=watsonx/ibm/granite-13b-chat-v2, messages=messages)` | -| Granite 13B Instruct V2 | `completion(model=watsonx/ibm/granite-13b-instruct-v2, messages=messages)` | -| Elyza Japanese Llama 2 7B Instruct | `completion(model=watsonx/elyza/elyza-japanese-llama-2-7b-instruct, messages=messages)` | -| Mixtral 8X7B Instruct V01 Q | `completion(model=watsonx/ibm-mistralai/mixtral-8x7b-instruct-v01-q, messages=messages)` | +| Mode Name | Command | +|------------------------------------|------------------------------------------------------------------------------------------| +| Flan T5 XXL | `completion(model=watsonx/google/flan-t5-xxl, messages=messages)` | +| Flan Ul2 | `completion(model=watsonx/google/flan-ul2, messages=messages)` | +| Mt0 XXL | `completion(model=watsonx/bigscience/mt0-xxl, messages=messages)` | +| Gpt Neox | `completion(model=watsonx/eleutherai/gpt-neox-20b, messages=messages)` | +| Mpt 7B Instruct2 | `completion(model=watsonx/ibm/mpt-7b-instruct2, messages=messages)` | +| Starcoder | `completion(model=watsonx/bigcode/starcoder, messages=messages)` | +| Llama 2 70B Chat | `completion(model=watsonx/meta-llama/llama-2-70b-chat, messages=messages)` | +| Llama 2 13B Chat | `completion(model=watsonx/meta-llama/llama-2-13b-chat, messages=messages)` | +| Granite 13B Instruct | `completion(model=watsonx/ibm/granite-13b-instruct-v1, messages=messages)` | +| Granite 13B Chat | `completion(model=watsonx/ibm/granite-13b-chat-v1, messages=messages)` | +| Flan T5 XL | `completion(model=watsonx/google/flan-t5-xl, messages=messages)` | +| Granite 13B Chat V2 | `completion(model=watsonx/ibm/granite-13b-chat-v2, messages=messages)` | +| Granite 13B Instruct V2 | `completion(model=watsonx/ibm/granite-13b-instruct-v2, messages=messages)` | +| Elyza Japanese Llama 2 7B Instruct | `completion(model=watsonx/elyza/elyza-japanese-llama-2-7b-instruct, messages=messages)` | +| Mixtral 8X7B Instruct V01 Q | `completion(model=watsonx/ibm-mistralai/mixtral-8x7b-instruct-v01-q, messages=messages)` | For a list of all available models in watsonx.ai, see [here](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models.html?context=wx&locale=en&audience=wdp). @@ -275,10 +275,10 @@ For a list of all available models in watsonx.ai, see [here](https://dataplatfor ## Supported IBM watsonx.ai Embedding Models -| Model Name | Function Call | -|----------------------|---------------------------------------------| -| Slate 30m | `embedding(model="watsonx/ibm/slate-30m-english-rtrvr", input=input)` | -| Slate 125m | `embedding(model="watsonx/ibm/slate-125m-english-rtrvr", input=input)` | +| Model Name | Function Call | +|------------|------------------------------------------------------------------------| +| Slate 30m | `embedding(model="watsonx/ibm/slate-30m-english-rtrvr", input=input)` | +| Slate 125m | `embedding(model="watsonx/ibm/slate-125m-english-rtrvr", input=input)` | For a list of all available embedding models in watsonx.ai, see [here](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-models-embed.html?context=wx). \ No newline at end of file diff --git a/docs/my-website/docs/providers/xinference.md b/docs/my-website/docs/providers/xinference.md index 3c927dcb43..3686c02098 100644 --- a/docs/my-website/docs/providers/xinference.md +++ b/docs/my-website/docs/providers/xinference.md @@ -37,26 +37,26 @@ print(response) ## Supported Models All models listed here https://inference.readthedocs.io/en/latest/models/builtin/embedding/index.html are supported -| Model Name | Function Call | -|------------------------------|--------------------------------------------------------| -| bge-base-en | `embedding(model="xinference/bge-base-en", input)` | -| bge-base-en-v1.5 | `embedding(model="xinference/bge-base-en-v1.5", input)` | -| bge-base-zh | `embedding(model="xinference/bge-base-zh", input)` | -| bge-base-zh-v1.5 | `embedding(model="xinference/bge-base-zh-v1.5", input)` | -| bge-large-en | `embedding(model="xinference/bge-large-en", input)` | -| bge-large-en-v1.5 | `embedding(model="xinference/bge-large-en-v1.5", input)` | -| bge-large-zh | `embedding(model="xinference/bge-large-zh", input)` | -| bge-large-zh-noinstruct | `embedding(model="xinference/bge-large-zh-noinstruct", input)` | -| bge-large-zh-v1.5 | `embedding(model="xinference/bge-large-zh-v1.5", input)` | -| bge-small-en-v1.5 | `embedding(model="xinference/bge-small-en-v1.5", input)` | -| bge-small-zh | `embedding(model="xinference/bge-small-zh", input)` | -| bge-small-zh-v1.5 | `embedding(model="xinference/bge-small-zh-v1.5", input)` | -| e5-large-v2 | `embedding(model="xinference/e5-large-v2", input)` | -| gte-base | `embedding(model="xinference/gte-base", input)` | -| gte-large | `embedding(model="xinference/gte-large", input)` | -| jina-embeddings-v2-base-en | `embedding(model="xinference/jina-embeddings-v2-base-en", input)` | -| jina-embeddings-v2-small-en | `embedding(model="xinference/jina-embeddings-v2-small-en", input)` | -| multilingual-e5-large | `embedding(model="xinference/multilingual-e5-large", input)` | +| Model Name | Function Call | +|-----------------------------|--------------------------------------------------------------------| +| bge-base-en | `embedding(model="xinference/bge-base-en", input)` | +| bge-base-en-v1.5 | `embedding(model="xinference/bge-base-en-v1.5", input)` | +| bge-base-zh | `embedding(model="xinference/bge-base-zh", input)` | +| bge-base-zh-v1.5 | `embedding(model="xinference/bge-base-zh-v1.5", input)` | +| bge-large-en | `embedding(model="xinference/bge-large-en", input)` | +| bge-large-en-v1.5 | `embedding(model="xinference/bge-large-en-v1.5", input)` | +| bge-large-zh | `embedding(model="xinference/bge-large-zh", input)` | +| bge-large-zh-noinstruct | `embedding(model="xinference/bge-large-zh-noinstruct", input)` | +| bge-large-zh-v1.5 | `embedding(model="xinference/bge-large-zh-v1.5", input)` | +| bge-small-en-v1.5 | `embedding(model="xinference/bge-small-en-v1.5", input)` | +| bge-small-zh | `embedding(model="xinference/bge-small-zh", input)` | +| bge-small-zh-v1.5 | `embedding(model="xinference/bge-small-zh-v1.5", input)` | +| e5-large-v2 | `embedding(model="xinference/e5-large-v2", input)` | +| gte-base | `embedding(model="xinference/gte-base", input)` | +| gte-large | `embedding(model="xinference/gte-large", input)` | +| jina-embeddings-v2-base-en | `embedding(model="xinference/jina-embeddings-v2-base-en", input)` | +| jina-embeddings-v2-small-en | `embedding(model="xinference/jina-embeddings-v2-small-en", input)` | +| multilingual-e5-large | `embedding(model="xinference/multilingual-e5-large", input)` | diff --git a/docs/my-website/docs/proxy/deploy.md b/docs/my-website/docs/proxy/deploy.md index 6fb8c5bfe3..b756f56e2e 100644 --- a/docs/my-website/docs/proxy/deploy.md +++ b/docs/my-website/docs/proxy/deploy.md @@ -260,7 +260,7 @@ Requirements: -We maintain a [seperate Dockerfile](https://github.com/BerriAI/litellm/pkgs/container/litellm-database) for reducing build time when running LiteLLM proxy with a connected Postgres Database +We maintain a [separate Dockerfile](https://github.com/BerriAI/litellm/pkgs/container/litellm-database) for reducing build time when running LiteLLM proxy with a connected Postgres Database ```shell docker pull ghcr.io/berriai/litellm-database:main-latest diff --git a/docs/my-website/docs/proxy/enterprise.md b/docs/my-website/docs/proxy/enterprise.md index e52a19162e..2b984b3e78 100644 --- a/docs/my-website/docs/proxy/enterprise.md +++ b/docs/my-website/docs/proxy/enterprise.md @@ -459,7 +459,7 @@ Step 1 Set a `LAKERA_API_KEY` in your env LAKERA_API_KEY="7a91a1a6059da*******" ``` -Step 2. Add `lakera_prompt_injection` to your calbacks +Step 2. Add `lakera_prompt_injection` to your callbacks ```yaml litellm_settings: diff --git a/docs/my-website/docs/proxy/logging.md b/docs/my-website/docs/proxy/logging.md index 692d69d29f..fd57545ca9 100644 --- a/docs/my-website/docs/proxy/logging.md +++ b/docs/my-website/docs/proxy/logging.md @@ -41,7 +41,9 @@ litellm_settings: **Step 3**: Set required env variables for logging to langfuse ```shell export LANGFUSE_PUBLIC_KEY="pk_kk" -export LANGFUSE_SECRET_KEY="sk_ss +export LANGFUSE_SECRET_KEY="sk_ss" +# Optional, defaults to https://cloud.langfuse.com +export LANGFUSE_HOST="https://xxx.langfuse.com" ``` **Step 4**: Start the proxy, make a test request diff --git a/docs/my-website/docs/tutorials/finetuned_chat_gpt.md b/docs/my-website/docs/tutorials/finetuned_chat_gpt.md index 641c45b5f8..5dde3b3ff9 100644 --- a/docs/my-website/docs/tutorials/finetuned_chat_gpt.md +++ b/docs/my-website/docs/tutorials/finetuned_chat_gpt.md @@ -1,8 +1,8 @@ # Using Fine-Tuned gpt-3.5-turbo LiteLLM allows you to call `completion` with your fine-tuned gpt-3.5-turbo models -If you're trying to create your custom finetuned gpt-3.5-turbo model following along on this tutorial: https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset +If you're trying to create your custom fine-tuned gpt-3.5-turbo model following along on this tutorial: https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset -Once you've created your fine tuned model, you can call it with `litellm.completion()` +Once you've created your fine-tuned model, you can call it with `litellm.completion()` ## Usage ```python diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 4d580f6666..80d90f38aa 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -69,6 +69,43 @@ class LangFuseLogger: else: self.upstream_langfuse = None + @staticmethod + def add_metadata_from_header(litellm_params: dict, metadata: dict) -> dict: + """ + Adds metadata from proxy request headers to Langfuse logging if keys start with "langfuse_" + and overwrites litellm_params.metadata if already included. + + For example if you want to append your trace to an existing `trace_id` via header, send + `headers: { ..., langfuse_existing_trace_id: your-existing-trace-id }` via proxy request. + """ + if litellm_params is None: + return metadata + + if litellm_params.get("proxy_server_request") is None: + return metadata + + if metadata is None: + metadata = {} + + proxy_headers = ( + litellm_params.get("proxy_server_request", {}).get("headers", {}) or {} + ) + + for metadata_param_key in proxy_headers: + if metadata_param_key.startswith("langfuse_"): + trace_param_key = metadata_param_key.replace("langfuse_", "", 1) + if trace_param_key in metadata: + verbose_logger.warning( + f"Overwriting Langfuse `{trace_param_key}` from request header" + ) + else: + verbose_logger.debug( + f"Found Langfuse `{trace_param_key}` in request header" + ) + metadata[trace_param_key] = proxy_headers.get(metadata_param_key) + + return metadata + # def log_error(kwargs, response_obj, start_time, end_time): # generation = trace.generation( # level ="ERROR" # can be any of DEBUG, DEFAULT, WARNING or ERROR @@ -97,6 +134,7 @@ class LangFuseLogger: metadata = ( litellm_params.get("metadata", {}) or {} ) # if litellm_params['metadata'] == None + metadata = self.add_metadata_from_header(litellm_params, metadata) optional_params = copy.deepcopy(kwargs.get("optional_params", {})) prompt = {"messages": kwargs.get("messages")} diff --git a/litellm/llms/ollama.py b/litellm/llms/ollama.py index 283878056f..76687839d8 100644 --- a/litellm/llms/ollama.py +++ b/litellm/llms/ollama.py @@ -2,8 +2,9 @@ from itertools import chain import requests, types, time # type: ignore import json, uuid import traceback -from typing import Optional +from typing import Optional, List import litellm +from litellm.types.utils import ProviderField import httpx, aiohttp, asyncio # type: ignore from .prompt_templates.factory import prompt_factory, custom_prompt @@ -124,6 +125,18 @@ class OllamaConfig: ) and v is not None } + + def get_required_params(self) -> List[ProviderField]: + """For a given provider, return it's required fields with a description""" + return [ + ProviderField( + field_name="base_url", + field_type="string", + field_description="Your Ollama API Base", + field_value="http://10.10.11.249:11434", + ) + ] + def get_supported_openai_params( self, ): diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 41ecb486ce..10f3f16ed4 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -833,7 +833,7 @@ def anthropic_messages_pt_xml(messages: list): ) # either string or none if messages[msg_i].get( "tool_calls", [] - ): # support assistant tool invoke convertion + ): # support assistant tool invoke conversion assistant_text += convert_to_anthropic_tool_invoke_xml( # type: ignore messages[msg_i]["tool_calls"] ) @@ -1224,7 +1224,7 @@ def anthropic_messages_pt(messages: list): if messages[msg_i].get( "tool_calls", [] - ): # support assistant tool invoke convertion + ): # support assistant tool invoke conversion assistant_content.extend( convert_to_anthropic_tool_invoke(messages[msg_i]["tool_calls"]) ) diff --git a/litellm/llms/vertex_ai.py b/litellm/llms/vertex_ai.py index 5171b1efcc..dc79e7e4e9 100644 --- a/litellm/llms/vertex_ai.py +++ b/litellm/llms/vertex_ai.py @@ -297,24 +297,29 @@ def _convert_gemini_role(role: str) -> Literal["user", "model"]: def _process_gemini_image(image_url: str) -> PartType: try: - if "gs://" in image_url: - # Case 1: Images with Cloud Storage URIs + if ".mp4" in image_url and "gs://" in image_url: + # Case 1: Videos with Cloud Storage URIs + part_mime = "video/mp4" + _file_data = FileDataType(mime_type=part_mime, file_uri=image_url) + return PartType(file_data=_file_data) + elif ".pdf" in image_url and "gs://" in image_url: + # Case 2: PDF's with Cloud Storage URIs + part_mime = "application/pdf" + _file_data = FileDataType(mime_type=part_mime, file_uri=image_url) + return PartType(file_data=_file_data) + elif "gs://" in image_url: + # Case 3: Images with Cloud Storage URIs # The supported MIME types for images include image/png and image/jpeg. part_mime = "image/png" if "png" in image_url else "image/jpeg" _file_data = FileDataType(mime_type=part_mime, file_uri=image_url) return PartType(file_data=_file_data) elif "https:/" in image_url: - # Case 2: Images with direct links + # Case 4: Images with direct links image = _load_image_from_url(image_url) _blob = BlobType(data=image.data, mime_type=image._mime_type) return PartType(inline_data=_blob) - elif ".mp4" in image_url and "gs://" in image_url: - # Case 3: Videos with Cloud Storage URIs - part_mime = "video/mp4" - _file_data = FileDataType(mime_type=part_mime, file_uri=image_url) - return PartType(file_data=_file_data) elif "base64" in image_url: - # Case 4: Images with base64 encoding + # Case 5: Images with base64 encoding import base64, re # base 64 is passed as data:image/jpeg;base64, @@ -390,7 +395,7 @@ def _gemini_convert_messages_with_history(messages: list) -> List[ContentType]: assistant_content.extend(_parts) elif messages[msg_i].get( "tool_calls", [] - ): # support assistant tool invoke convertion + ): # support assistant tool invoke conversion assistant_content.extend( convert_to_gemini_tool_call_invoke(messages[msg_i]["tool_calls"]) ) diff --git a/litellm/utils.py b/litellm/utils.py index a1ef3352b2..2c3f884e9c 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -7352,6 +7352,10 @@ def get_provider_fields(custom_llm_provider: str) -> List[ProviderField]: if custom_llm_provider == "databricks": return litellm.DatabricksConfig().get_required_params() + + elif custom_llm_provider == "ollama": + return litellm.OllamaConfig().get_required_params() + else: return [] diff --git a/poetry.lock b/poetry.lock index ed4c4138d7..64896fdb82 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohttp" @@ -2114,6 +2114,7 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -2121,8 +2122,15 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -2139,6 +2147,7 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -2146,6 +2155,7 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -3140,4 +3150,4 @@ proxy = ["PyJWT", "apscheduler", "backoff", "cryptography", "fastapi", "fastapi- [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0, !=3.9.7" -content-hash = "a54d969a1a707413e7cd3ce869d14ef73dd41bb9d36ebf0fb878d9e929bc15b3" +content-hash = "6a37992b63b11d254f5f40687bd96898b1d9515728f663f30dcc81c4ef8df7b7" diff --git a/pyproject.toml b/pyproject.toml index 9859bf1b2a..648a8b41ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,7 +62,8 @@ extra_proxy = [ "azure-identity", "azure-keyvault-secrets", "google-cloud-kms", - "resend" + "resend", + "pynacl" ] [tool.poetry.scripts] diff --git a/ui/litellm-dashboard/src/components/model_dashboard.tsx b/ui/litellm-dashboard/src/components/model_dashboard.tsx index adb45346b7..73e5a7a8f1 100644 --- a/ui/litellm-dashboard/src/components/model_dashboard.tsx +++ b/ui/litellm-dashboard/src/components/model_dashboard.tsx @@ -145,6 +145,7 @@ enum Providers { OpenAI_Compatible = "OpenAI-Compatible Endpoints (Groq, Together AI, Mistral AI, etc.)", Vertex_AI = "Vertex AI (Anthropic, Gemini, etc.)", Databricks = "Databricks", + Ollama = "Ollama", } const provider_map: Record = { @@ -156,6 +157,7 @@ const provider_map: Record = { OpenAI_Compatible: "openai", Vertex_AI: "vertex_ai", Databricks: "databricks", + Ollama: "ollama", }; const retry_policy_map: Record = { @@ -1747,6 +1749,7 @@ const ModelDashboard: React.FC = ({ )} {selectedProvider != Providers.Bedrock && selectedProvider != Providers.Vertex_AI && + selectedProvider != Providers.Ollama && (dynamicProviderForm === undefined || dynamicProviderForm.fields.length == 0) && (