fix: fix linting errors

This commit is contained in:
Krrish Dholakia 2024-07-11 13:36:55 -07:00
parent 6e9f048618
commit 389a51e05d
4 changed files with 97 additions and 134 deletions

View file

@ -1,12 +1,12 @@
repos: repos:
- repo: local - repo: local
hooks: hooks:
# - id: mypy - id: mypy
# name: mypy name: mypy
# entry: python3 -m mypy --ignore-missing-imports entry: python3 -m mypy --ignore-missing-imports
# language: system language: system
# types: [python] types: [python]
# files: ^litellm/ files: ^litellm/
- id: isort - id: isort
name: isort name: isort
entry: isort entry: isort

View file

@ -789,6 +789,7 @@ from .utils import (
get_api_base, get_api_base,
get_first_chars_messages, get_first_chars_messages,
ModelResponse, ModelResponse,
EmbeddingResponse,
ImageResponse, ImageResponse,
get_provider_fields, get_provider_fields,
) )

View file

@ -3,92 +3,96 @@ Deprecated. We now do together ai calls via the openai client.
Reference: https://docs.together.ai/docs/openai-api-compatibility Reference: https://docs.together.ai/docs/openai-api-compatibility
""" """
# import os, types import json
# import json import os
# from enum import Enum import time
# import requests # type: ignore import types
# import time from enum import Enum
# from typing import Callable, Optional from typing import Callable, Optional
# import litellm
# import httpx # type: ignore import httpx # type: ignore
# from litellm.utils import ModelResponse, Usage import requests # type: ignore
# from .prompt_templates.factory import prompt_factory, custom_prompt
import litellm
from litellm.utils import ModelResponse, Usage
from .prompt_templates.factory import custom_prompt, prompt_factory
# class TogetherAIError(Exception): class TogetherAIError(Exception):
# def __init__(self, status_code, message): def __init__(self, status_code, message):
# self.status_code = status_code self.status_code = status_code
# self.message = message self.message = message
# self.request = httpx.Request( self.request = httpx.Request(
# method="POST", url="https://api.together.xyz/inference" method="POST", url="https://api.together.xyz/inference"
# ) )
# self.response = httpx.Response(status_code=status_code, request=self.request) self.response = httpx.Response(status_code=status_code, request=self.request)
# super().__init__( super().__init__(
# self.message self.message
# ) # Call the base class constructor with the parameters it needs ) # Call the base class constructor with the parameters it needs
# class TogetherAIConfig: class TogetherAIConfig:
# """ """
# Reference: https://docs.together.ai/reference/inference Reference: https://docs.together.ai/reference/inference
# The class `TogetherAIConfig` provides configuration for the TogetherAI's API interface. Here are the parameters: The class `TogetherAIConfig` provides configuration for the TogetherAI's API interface. Here are the parameters:
# - `max_tokens` (int32, required): The maximum number of tokens to generate. - `max_tokens` (int32, required): The maximum number of tokens to generate.
# - `stop` (string, optional): A string sequence that will truncate (stop) the inference text output. For example, "\n\n" will stop generation as soon as the model generates two newlines. - `stop` (string, optional): A string sequence that will truncate (stop) the inference text output. For example, "\n\n" will stop generation as soon as the model generates two newlines.
# - `temperature` (float, optional): A decimal number that determines the degree of randomness in the response. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output. - `temperature` (float, optional): A decimal number that determines the degree of randomness in the response. A value of 1 will always yield the same output. A temperature less than 1 favors more correctness and is appropriate for question answering or summarization. A value greater than 1 introduces more randomness in the output.
# - `top_p` (float, optional): The `top_p` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold, below which all less likely tokens are filtered out. This technique helps to maintain diversity and generate more fluent and natural-sounding text. - `top_p` (float, optional): The `top_p` (nucleus) parameter is used to dynamically adjust the number of choices for each predicted token based on the cumulative probabilities. It specifies a probability threshold, below which all less likely tokens are filtered out. This technique helps to maintain diversity and generate more fluent and natural-sounding text.
# - `top_k` (int32, optional): The `top_k` parameter is used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options. - `top_k` (int32, optional): The `top_k` parameter is used to limit the number of choices for the next predicted word or token. It specifies the maximum number of tokens to consider at each step, based on their probability of occurrence. This technique helps to speed up the generation process and can improve the quality of the generated text by focusing on the most likely options.
# - `repetition_penalty` (float, optional): A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition. - `repetition_penalty` (float, optional): A number that controls the diversity of generated text by reducing the likelihood of repeated sequences. Higher values decrease repetition.
# - `logprobs` (int32, optional): This parameter is not described in the prompt. - `logprobs` (int32, optional): This parameter is not described in the prompt.
# """ """
# max_tokens: Optional[int] = None max_tokens: Optional[int] = None
# stop: Optional[str] = None stop: Optional[str] = None
# temperature: Optional[int] = None temperature: Optional[int] = None
# top_p: Optional[float] = None top_p: Optional[float] = None
# top_k: Optional[int] = None top_k: Optional[int] = None
# repetition_penalty: Optional[float] = None repetition_penalty: Optional[float] = None
# logprobs: Optional[int] = None logprobs: Optional[int] = None
# def __init__( def __init__(
# self, self,
# max_tokens: Optional[int] = None, max_tokens: Optional[int] = None,
# stop: Optional[str] = None, stop: Optional[str] = None,
# temperature: Optional[int] = None, temperature: Optional[int] = None,
# top_p: Optional[float] = None, top_p: Optional[float] = None,
# top_k: Optional[int] = None, top_k: Optional[int] = None,
# repetition_penalty: Optional[float] = None, repetition_penalty: Optional[float] = None,
# logprobs: Optional[int] = None, logprobs: Optional[int] = None,
# ) -> None: ) -> None:
# locals_ = locals() locals_ = locals()
# for key, value in locals_.items(): for key, value in locals_.items():
# if key != "self" and value is not None: if key != "self" and value is not None:
# setattr(self.__class__, key, value) setattr(self.__class__, key, value)
# @classmethod @classmethod
# def get_config(cls): def get_config(cls):
# return { return {
# k: v k: v
# for k, v in cls.__dict__.items() for k, v in cls.__dict__.items()
# if not k.startswith("__") if not k.startswith("__")
# and not isinstance( and not isinstance(
# v, v,
# ( (
# types.FunctionType, types.FunctionType,
# types.BuiltinFunctionType, types.BuiltinFunctionType,
# classmethod, classmethod,
# staticmethod, staticmethod,
# ), ),
# ) )
# and v is not None and v is not None
# } }
# def validate_environment(api_key): # def validate_environment(api_key):

View file

@ -1934,51 +1934,7 @@ def completion(
""" """
Deprecated. We now do together ai calls via the openai client - https://docs.together.ai/docs/openai-api-compatibility Deprecated. We now do together ai calls via the openai client - https://docs.together.ai/docs/openai-api-compatibility
""" """
custom_llm_provider = "together_ai" pass
together_ai_key = (
api_key
or litellm.togetherai_api_key
or get_secret("TOGETHER_AI_TOKEN")
or get_secret("TOGETHERAI_API_KEY")
or litellm.api_key
)
api_base = (
api_base
or litellm.api_base
or get_secret("TOGETHERAI_API_BASE")
or "https://api.together.xyz/inference"
)
custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
model_response = together_ai.completion(
model=model,
messages=messages,
api_base=api_base,
model_response=model_response,
print_verbose=print_verbose,
optional_params=optional_params,
litellm_params=litellm_params,
logger_fn=logger_fn,
encoding=encoding,
api_key=together_ai_key,
logging_obj=logging,
custom_prompt_dict=custom_prompt_dict,
)
if (
"stream_tokens" in optional_params
and optional_params["stream_tokens"] == True
):
# don't try to access stream object,
response = CustomStreamWrapper(
model_response,
model,
custom_llm_provider="together_ai",
logging_obj=logging,
)
return response
response = model_response
elif custom_llm_provider == "palm": elif custom_llm_provider == "palm":
palm_api_key = api_key or get_secret("PALM_API_KEY") or litellm.api_key palm_api_key = api_key or get_secret("PALM_API_KEY") or litellm.api_key
@ -2461,10 +2417,10 @@ def completion(
## LOGGING ## LOGGING
generator = ollama.get_ollama_response( generator = ollama.get_ollama_response(
api_base, api_base=api_base,
model, model=model,
prompt, prompt=prompt,
optional_params, optional_params=optional_params,
logging_obj=logging, logging_obj=logging,
acompletion=acompletion, acompletion=acompletion,
model_response=model_response, model_response=model_response,
@ -2490,11 +2446,11 @@ def completion(
) )
## LOGGING ## LOGGING
generator = ollama_chat.get_ollama_response( generator = ollama_chat.get_ollama_response(
api_base, api_base=api_base,
api_key, api_key=api_key,
model, model=model,
messages, messages=messages,
optional_params, optional_params=optional_params,
logging_obj=logging, logging_obj=logging,
acompletion=acompletion, acompletion=acompletion,
model_response=model_response, model_response=model_response,
@ -3465,7 +3421,7 @@ def embedding(
or api_base or api_base
or get_secret("OLLAMA_API_BASE") or get_secret("OLLAMA_API_BASE")
or "http://localhost:11434" or "http://localhost:11434"
) ) # type: ignore
if isinstance(input, str): if isinstance(input, str):
input = [input] input = [input]
if not all(isinstance(item, str) for item in input): if not all(isinstance(item, str) for item in input):
@ -3475,9 +3431,11 @@ def embedding(
llm_provider="ollama", # type: ignore llm_provider="ollama", # type: ignore
) )
ollama_embeddings_fn = ( ollama_embeddings_fn = (
ollama.ollama_aembeddings if aembedding else ollama.ollama_embeddings ollama.ollama_aembeddings
if aembedding is True
else ollama.ollama_embeddings
) )
response = ollama_embeddings_fn( response = ollama_embeddings_fn( # type: ignore
api_base=api_base, api_base=api_base,
model=model, model=model,
prompts=input, prompts=input,