mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
Expanded .env, added Poetry and basic Docstring
This commit is contained in:
parent
5f5ad99607
commit
05ca788fb3
6 changed files with 1369 additions and 2 deletions
17
.env.example
17
.env.example
|
@ -1,4 +1,19 @@
|
|||
# OpenAI
|
||||
OPENAI_API_KEY = ""
|
||||
# Cohere
|
||||
COHERE_API_KEY = ""
|
||||
# OpenRouter
|
||||
OR_SITE_URL = ""
|
||||
OR_APP_NAME = "LiteLLM Example app"
|
||||
OR_APP_NAME = "LiteLLM Example app"
|
||||
OR_API_KEY = ""
|
||||
# Azure API base URL
|
||||
AZURE_API_BASE = ""
|
||||
# Azure API version
|
||||
AZURE_API_VERSION = ""
|
||||
# Azure API key
|
||||
AZURE_API_KEY = ""
|
||||
# Replicate
|
||||
REPLICATE_API_KEY = ""
|
||||
REPLICATE_API_TOKEN = ""
|
||||
# Anthropic
|
||||
ANTHROPIC_API_KEY = ""
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1 +1,2 @@
|
|||
.venv
|
||||
.env
|
|
@ -65,6 +65,31 @@ def completion(
|
|||
# Optional liteLLM function params
|
||||
*, force_timeout=60, azure=False, logger_fn=None, verbose=False
|
||||
):
|
||||
# Docstring
|
||||
'''
|
||||
Parameters:
|
||||
Required:
|
||||
model (str): The model name to use for completion.
|
||||
messages (list): A list of messages to feed into the completion engine.
|
||||
Optional:
|
||||
functions (list): A list of functions to call.
|
||||
function_call (str): A string that calls the functions passed in the functions parameter.
|
||||
temperature (float): What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or top_p but not both.
|
||||
top_p (float): An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
|
||||
n (int): How many completions to generate for each prompt.
|
||||
stream (bool): Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as available, with the stream terminated by a data: [DONE] message. Otherwise, tokens will be returned as a standard JSON response.
|
||||
stop (list): One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
|
||||
max_tokens (int): How many tokens to complete to. Can return fewer if a stop sequence is hit. In text-generation tasks, the API may return fewer than the max length.
|
||||
presence_penalty (float): What penalty to apply if a token is already present at all. Bigger values mean the model will be less likely to repeat itself.
|
||||
frequency_penalty (float): What penalty to apply if a token is already present in the text so far. Bigger values mean the model will be less likely to repeat itself.
|
||||
logit_bias (dict): Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this parameter to bias the completion.
|
||||
user (str): A unique identifier representing your end-user.
|
||||
|
||||
Returns:
|
||||
response (dict): A dictionary containing the completion response.
|
||||
|
||||
Most parameters are taken from OpenAI API Reference: https://platform.openai.com/docs/api-reference/chat/create
|
||||
'''
|
||||
try:
|
||||
# check if user passed in any of the OpenAI optional params
|
||||
optional_params = get_optional_params(
|
||||
|
|
1306
poetry.lock
generated
Normal file
1306
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
21
pyproject.toml
Normal file
21
pyproject.toml
Normal file
|
@ -0,0 +1,21 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.212"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.9"
|
||||
openai = {extras = ["datalib"], version = "^0.27.8"}
|
||||
cohere = "^4.18.0"
|
||||
pytest = "^7.4.0"
|
||||
anthropic = "^0.3.7"
|
||||
replicate = "^0.10.0"
|
||||
python-dotenv = "^1.0.0"
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
|
@ -3,6 +3,5 @@ cohere
|
|||
anthropic
|
||||
replicate
|
||||
pytest
|
||||
pytest
|
||||
python-dotenv
|
||||
openai[datalib]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue