diff --git a/README.md b/README.md
index 8dc045be93..8b2f9cce3c 100644
--- a/README.md
+++ b/README.md
@@ -34,6 +34,8 @@ LiteLLM manages:
[**Jump to OpenAI Proxy Docs**](https://github.com/BerriAI/litellm?tab=readme-ov-file#openai-proxy---docs)
[**Jump to Supported LLM Providers**](https://github.com/BerriAI/litellm?tab=readme-ov-file#supported-provider-docs)
+Support for more providers. Missing a provider or LLM Platform, raise a [feature request](https://github.com/BerriAI/litellm/issues/new?assignees=&labels=enhancement&projects=&template=feature_request.yml&title=%5BFeature%5D%3A+).
+
# Usage ([**Docs**](https://docs.litellm.ai/docs/))
> [!IMPORTANT]
> LiteLLM v1.0.0 now requires `openai>=1.0.0`. Migration guide [here](https://docs.litellm.ai/docs/migration)
diff --git a/docs/my-website/docs/proxy/quick_start.md b/docs/my-website/docs/proxy/quick_start.md
index f6513e2b87..4f508ee592 100644
--- a/docs/my-website/docs/proxy/quick_start.md
+++ b/docs/my-website/docs/proxy/quick_start.md
@@ -370,12 +370,12 @@ See the latest available ghcr docker image here:
https://github.com/berriai/litellm/pkgs/container/litellm
```shell
-docker pull ghcr.io/berriai/litellm:main-v1.16.13
+docker pull ghcr.io/berriai/litellm:main-latest
```
### Run the Docker Image
```shell
-docker run ghcr.io/berriai/litellm:main-v1.16.13
+docker run ghcr.io/berriai/litellm:main-latest
```
#### Run the Docker Image with LiteLLM CLI args
@@ -384,12 +384,12 @@ See all supported CLI args [here](https://docs.litellm.ai/docs/proxy/cli):
Here's how you can run the docker image and pass your config to `litellm`
```shell
-docker run ghcr.io/berriai/litellm:main-v1.16.13 --config your_config.yaml
+docker run ghcr.io/berriai/litellm:main-latest --config your_config.yaml
```
Here's how you can run the docker image and start litellm on port 8002 with `num_workers=8`
```shell
-docker run ghcr.io/berriai/litellm:main-v1.16.13 --port 8002 --num_workers 8
+docker run ghcr.io/berriai/litellm:main-latest --port 8002 --num_workers 8
```
#### Run the Docker Image using docker compose
diff --git a/docs/my-website/docs/proxy/virtual_keys.md b/docs/my-website/docs/proxy/virtual_keys.md
index 28842e5e25..2be4b95c1f 100644
--- a/docs/my-website/docs/proxy/virtual_keys.md
+++ b/docs/my-website/docs/proxy/virtual_keys.md
@@ -696,7 +696,9 @@ general_settings:
"region_name": "us-west-2"
"user_table_name": "your-user-table",
"key_table_name": "your-token-table",
- "config_table_name": "your-config-table"
+ "config_table_name": "your-config-table",
+ "aws_role_name": "your-aws_role_name",
+ "aws_session_name": "your-aws_session_name",
}
```
diff --git a/litellm/caching.py b/litellm/caching.py
index f0ae7778af..5649720680 100644
--- a/litellm/caching.py
+++ b/litellm/caching.py
@@ -675,6 +675,9 @@ class S3Cache(BaseCache):
def flush_cache(self):
pass
+ async def disconnect(self):
+ pass
+
class DualCache(BaseCache):
"""
diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py
index d0cdd7702a..061a339626 100644
--- a/litellm/integrations/custom_logger.py
+++ b/litellm/integrations/custom_logger.py
@@ -70,7 +70,6 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac
user_api_key_dict: UserAPIKeyAuth,
response,
):
- pass
#### SINGLE-USE #### - https://docs.litellm.ai/docs/observability/custom_callback#using-your-custom-callback-function
diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py
index ae076fdf02..b7f1c50236 100644
--- a/litellm/llms/bedrock.py
+++ b/litellm/llms/bedrock.py
@@ -477,8 +477,8 @@ def init_bedrock_client(
def convert_messages_to_prompt(model, messages, provider, custom_prompt_dict):
- # handle anthropic prompts using anthropic constants
- if provider == "anthropic":
+ # handle anthropic prompts and amazon titan prompts
+ if provider == "anthropic" or provider == "amazon":
if model in custom_prompt_dict:
# check if the model has a registered custom prompt
model_prompt_details = custom_prompt_dict[model]
@@ -490,7 +490,7 @@ def convert_messages_to_prompt(model, messages, provider, custom_prompt_dict):
)
else:
prompt = prompt_factory(
- model=model, messages=messages, custom_llm_provider="anthropic"
+ model=model, messages=messages, custom_llm_provider="bedrock"
)
else:
prompt = ""
@@ -623,6 +623,7 @@ def completion(
"textGenerationConfig": inference_params,
}
)
+
else:
data = json.dumps({})
diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py
index 6321860cc5..7896d7c969 100644
--- a/litellm/llms/prompt_templates/factory.py
+++ b/litellm/llms/prompt_templates/factory.py
@@ -90,9 +90,11 @@ def ollama_pt(
return {"prompt": prompt, "images": images}
else:
prompt = "".join(
- m["content"]
- if isinstance(m["content"], str) is str
- else "".join(m["content"])
+ (
+ m["content"]
+ if isinstance(m["content"], str) is str
+ else "".join(m["content"])
+ )
for m in messages
)
return prompt
@@ -422,6 +424,34 @@ def anthropic_pt(
return prompt
+def amazon_titan_pt(
+ messages: list,
+): # format - https://github.com/BerriAI/litellm/issues/1896
+ """
+ Amazon Titan uses 'User:' and 'Bot: in it's prompt template
+ """
+
+ class AmazonTitanConstants(Enum):
+ HUMAN_PROMPT = "\n\nUser: " # Assuming this is similar to Anthropic prompt formatting, since amazon titan's prompt formatting is currently undocumented
+ AI_PROMPT = "\n\nBot: "
+
+ prompt = ""
+ for idx, message in enumerate(messages):
+ if message["role"] == "user":
+ prompt += f"{AmazonTitanConstants.HUMAN_PROMPT.value}{message['content']}"
+ elif message["role"] == "system":
+ prompt += f"{AmazonTitanConstants.HUMAN_PROMPT.value}
0?a=a.charAt(0)+"."+a.slice(1)+x(r):i>1&&(a=a.charAt(0)+"."+a.slice(1)),a=a+(o<0?"e":"e+")+o):o<0?(a="0."+x(-o-1)+a,n&&(r=n-i)>0&&(a+=x(r))):o>=i?(a+=x(o+1-i),n&&(r=n-o-1)>0&&(a=a+"."+x(r))):((r=o+1)0&&(o+1===i&&(a+="."),a+=x(r))),e.s<0?"-"+a:a}function I(e,t){if(e.length>t)return e.length=t,!0}function N(e){if(!e||"object"!=typeof e)throw Error(l+"Object expected");var t,n,r,o=["precision",1,1e9,"rounding",0,8,"toExpNeg",-1/0,0,"toExpPos",0,1/0];for(t=0;to?0:o+t),(n=n>o?o:n)<0&&(n+=o),o=t>n?0:n-t>>>0,t>>>=0;for(var a=Array(o);++r0){if(++n>=800)return arguments[0]}else n=0;return e.apply(void 0,arguments)}}},4800:function(e,t,n){var r=n(5835);e.exports=function(){this.__data__=new r,this.size=0}},73987:function(e){e.exports=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n}},59728:function(e){e.exports=function(e){return this.__data__.get(e)}},4146:function(e){e.exports=function(e){return this.__data__.has(e)}},81333:function(e,t,n){var r=n(5835),o=n(58246),a=n(93785);e.exports=function(e,t){var n=this.__data__;if(n instanceof r){var i=n.__data__;if(!o||i.length<199)return i.push([e,t]),this.size=++n.size,this;n=this.__data__=new a(i)}return n.set(e,t),this.size=n.size,this}},48150:function(e){e.exports=function(e,t,n){for(var r=n-1,o=e.length;++r