mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(openai-py): fix linting issues
This commit is contained in:
parent
208a6d365b
commit
68461f5863
2 changed files with 7 additions and 7 deletions
|
@ -232,12 +232,12 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
CustomOpenAIError(status_code=500, message="Invalid response object.")
|
||||
|
||||
def completion(self,
|
||||
model: str=None,
|
||||
messages: list=None,
|
||||
model_response: ModelResponse=None,
|
||||
print_verbose: Callable=None,
|
||||
model: Optional[str]=None,
|
||||
messages: Optional[list]=None,
|
||||
model_response: Optional[ModelResponse]=None,
|
||||
print_verbose: Optional[Callable]=None,
|
||||
api_key: Optional[str]=None,
|
||||
api_base: str=None,
|
||||
api_base: Optional[str]=None,
|
||||
logging_obj=None,
|
||||
optional_params=None,
|
||||
litellm_params=None,
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
# add_function_to_prompt = True # e.g: Ollama doesn't support functions, so add it to the prompt instead
|
||||
# drop_params = True # drop any params not supported by the provider (e.g. Ollama)
|
||||
|
||||
[local_model] # run via `litellm --model local_model`
|
||||
[local_model] # run via `litellm --model local`
|
||||
# model_name = "ollama/codellama" # Uncomment to set a local model
|
||||
# max_tokens = "" # set max tokens for the model
|
||||
# temperature = "" # set temperature for the model
|
||||
|
@ -29,7 +29,7 @@
|
|||
# MODEL_PRE_PROMPT = "You are a good bot" # Applied at the start of the prompt
|
||||
# MODEL_POST_PROMPT = "Now answer as best as you can" # Applied at the end of the prompt
|
||||
|
||||
[hosted_model] # run via `litellm --model hosted_model`
|
||||
[hosted_model] # run via `litellm --model hosted`
|
||||
# model_name = "gpt-4"
|
||||
# max_tokens = "" # set max tokens for the model
|
||||
# temperature = "" # set temperature for the model
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue