litellm-mirror/litellm/llms
2024-05-09 20:25:30 +02:00
..
custom_httpx fix(http_handler.py): fix linting error 2024-04-19 15:45:24 -07:00
huggingface_llms_metadata add hf tgi and conversational models 2023-09-27 15:56:45 -07:00
prompt_templates Revert "Add support for async streaming to watsonx provider " 2024-05-09 07:44:15 -07:00
tokenizers fix(openai.py): return logprobs for text completion calls 2024-04-02 14:05:56 -07:00
__init__.py add linting 2023-08-18 11:05:05 -07:00
ai21.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
aleph_alpha.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
anthropic.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
anthropic_text.py fix(utils.py): fix streaming to not return usage dict 2024-04-24 08:06:07 -07:00
azure.py Merge branch 'main' into litellm_region_based_routing 2024-05-08 22:19:51 -07:00
azure_text.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
base.py build(pyproject.toml): drop certifi dependency (unused) 2024-01-10 08:09:03 +05:30
baseten.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
bedrock.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
cloudflare.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
cohere.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
cohere_chat.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
gemini.py fix(utils.py): fix streaming to not return usage dict 2024-04-24 08:06:07 -07:00
huggingface_restapi.py fix(huggingface_restapi.py): fix hf streaming issue 2024-03-04 21:16:41 -08:00
maritalk.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
nlp_cloud.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
ollama.py Merge branch 'BerriAI:main' into ollama-image-handling 2024-05-09 20:25:30 +02:00
ollama_chat.py Make newline same in async function 2024-05-05 18:51:53 -07:00
oobabooga.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
openai.py support stream_options for chat completion models 2024-05-08 21:52:25 -07:00
openrouter.py refactor: add black formatting 2023-12-25 14:11:20 +05:30
palm.py fix(utils.py): fix streaming to not return usage dict 2024-04-24 08:06:07 -07:00
petals.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
replicate.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
sagemaker.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
together_ai.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
vertex_ai.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
vertex_ai_anthropic.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
vllm.py feat(proxy_server.py): return litellm version in response headers 2024-05-08 16:00:08 -07:00
watsonx.py Revert "Add support for async streaming to watsonx provider " 2024-05-09 07:44:15 -07:00