forked from phoenix/litellm-mirror
(feat) add logprobs, top_logprobs to litellm.completion
This commit is contained in:
parent
ae074814c9
commit
0b0d22d58c
1 changed files with 4 additions and 0 deletions
|
@ -312,6 +312,8 @@ def completion(
|
|||
seed: Optional[int] = None,
|
||||
tools: Optional[List] = None,
|
||||
tool_choice: Optional[str] = None,
|
||||
logprobs: Optional[bool] = None,
|
||||
top_logprobs: Optional[int] = None,
|
||||
deployment_id=None,
|
||||
# set api_base, api_version, api_key
|
||||
base_url: Optional[str] = None,
|
||||
|
@ -340,6 +342,8 @@ def completion(
|
|||
frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far.
|
||||
logit_bias (dict, optional): Used to modify the probability of specific tokens appearing in the completion.
|
||||
user (str, optional): A unique identifier representing your end-user. This can help the LLM provider to monitor and detect abuse.
|
||||
logprobs (bool, optional): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message
|
||||
top_logprobs (int, optional): An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
|
||||
metadata (dict, optional): Pass in additional metadata to tag your completion calls - eg. prompt version, details, etc.
|
||||
api_base (str, optional): Base URL for the API (default is None).
|
||||
api_version (str, optional): API version (default is None).
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue