(feat) add ollama_chat v0

This commit is contained in:
ishaan-jaff 2023-12-25 14:27:10 +05:30
parent 4905929de3
commit da4ec6c8b6
2 changed files with 450 additions and 0 deletions

View file

@ -49,6 +49,7 @@ from .llms import (
baseten,
vllm,
ollama,
ollama_chat,
cohere,
petals,
oobabooga,
@ -1529,6 +1530,52 @@ def completion(
if acompletion is True or optional_params.get("stream", False) == True:
return generator
response = generator
elif custom_llm_provider == "ollama-chat":
api_base = (
litellm.api_base
or api_base
or get_secret("OLLAMA_API_BASE")
or "http://localhost:11434"
)
custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
if model in custom_prompt_dict:
# check if the model has a registered custom prompt
model_prompt_details = custom_prompt_dict[model]
prompt = custom_prompt(
role_dict=model_prompt_details["roles"],
initial_prompt_value=model_prompt_details["initial_prompt_value"],
final_prompt_value=model_prompt_details["final_prompt_value"],
messages=messages,
)
else:
prompt = prompt_factory(
model=model,
messages=messages,
custom_llm_provider=custom_llm_provider,
)
if isinstance(prompt, dict):
# for multimode models - ollama/llava prompt_factory returns a dict {
# "prompt": prompt,
# "images": images
# }
prompt, images = prompt["prompt"], prompt["images"]
optional_params["images"] = images
## LOGGING
generator = ollama_chat.get_ollama_response(
api_base,
model,
prompt,
optional_params,
logging_obj=logging,
acompletion=acompletion,
model_response=model_response,
encoding=encoding,
)
if acompletion is True or optional_params.get("stream", False) == True:
return generator
response = generator
elif (
custom_llm_provider == "baseten"