litellm-mirror/litellm/llms
Krish Dholakia a671046b45
Merge pull request #3552 from BerriAI/litellm_predibase_support
feat(predibase.py): add support for predibase provider
2024-05-09 22:21:16 -07:00
..
custom_httpx fix(http_handler.py): fix linting error 2024-04-19 15:45:24 -07:00
huggingface_llms_metadata add hf tgi and conversational models 2023-09-27 15:56:45 -07:00
prompt_templates Revert "Add support for async streaming to watsonx provider " 2024-05-09 07:44:15 -07:00
tokenizers fix(openai.py): return logprobs for text completion calls 2024-04-02 14:05:56 -07:00
__init__.py add linting 2023-08-18 11:05:05 -07:00
ai21.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
aleph_alpha.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
anthropic.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
anthropic_text.py fix(utils.py): fix streaming to not return usage dict 2024-04-24 08:06:07 -07:00
azure.py Merge branch 'main' into litellm_region_based_routing 2024-05-08 22:19:51 -07:00
azure_text.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
base.py build(pyproject.toml): drop certifi dependency (unused) 2024-01-10 08:09:03 +05:30
baseten.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
bedrock.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
cloudflare.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
cohere.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
cohere_chat.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
gemini.py fix(utils.py): fix streaming to not return usage dict 2024-04-24 08:06:07 -07:00
huggingface_restapi.py fix: fix linting errors 2024-05-09 17:55:27 -07:00
maritalk.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
nlp_cloud.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
ollama.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
ollama_chat.py Make newline same in async function 2024-05-05 18:51:53 -07:00
oobabooga.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
openai.py stream_options for text-completionopenai 2024-05-09 08:37:40 -07:00
openrouter.py refactor: add black formatting 2023-12-25 14:11:20 +05:30
palm.py fix(utils.py): fix streaming to not return usage dict 2024-04-24 08:06:07 -07:00
petals.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
predibase.py fix(predibase.py): fix async streaming 2024-05-09 22:18:16 -07:00
replicate.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
sagemaker.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
together_ai.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
vertex_ai.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
vertex_ai_anthropic.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
vllm.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
watsonx.py Revert "Add support for async streaming to watsonx provider " 2024-05-09 07:44:15 -07:00