Merge pull request #3561 from simonsanvil/feature/watsonx-integration

(fix) Fixed linting and other bugs with watsonx provider
This commit is contained in:
Krish Dholakia 2024-05-11 09:56:02 -07:00 committed by GitHub
commit d33e49411d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 310 additions and 101 deletions

View file

@ -1,12 +1,26 @@
from enum import Enum from enum import Enum
import json, types, time # noqa: E401 import json, types, time # noqa: E401
from contextlib import contextmanager from contextlib import asynccontextmanager, contextmanager
from typing import Callable, Dict, Optional, Any, Union, List from typing import (
Callable,
Dict,
Generator,
AsyncGenerator,
Iterator,
AsyncIterator,
Optional,
Any,
Union,
List,
ContextManager,
AsyncContextManager,
)
import httpx # type: ignore import httpx # type: ignore
import requests # type: ignore import requests # type: ignore
import litellm import litellm
from litellm.utils import ModelResponse, get_secret, Usage from litellm.utils import ModelResponse, Usage, get_secret
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
from .base import BaseLLM from .base import BaseLLM
from .prompt_templates import factory as ptf from .prompt_templates import factory as ptf
@ -188,11 +202,12 @@ class WatsonXAIEndpoint(str, Enum):
) )
EMBEDDINGS = "/ml/v1/text/embeddings" EMBEDDINGS = "/ml/v1/text/embeddings"
PROMPTS = "/ml/v1/prompts" PROMPTS = "/ml/v1/prompts"
AVAILABLE_MODELS = "/ml/v1/foundation_model_specs"
class IBMWatsonXAI(BaseLLM): class IBMWatsonXAI(BaseLLM):
""" """
Class to interface with IBM Watsonx.ai API for text generation and embeddings. Class to interface with IBM watsonx.ai API for text generation and embeddings.
Reference: https://cloud.ibm.com/apidocs/watsonx-ai Reference: https://cloud.ibm.com/apidocs/watsonx-ai
""" """
@ -343,7 +358,7 @@ class IBMWatsonXAI(BaseLLM):
) )
if token is None and api_key is not None: if token is None and api_key is not None:
# generate the auth token # generate the auth token
if print_verbose: if print_verbose is not None:
print_verbose("Generating IAM token for Watsonx.ai") print_verbose("Generating IAM token for Watsonx.ai")
token = self.generate_iam_token(api_key) token = self.generate_iam_token(api_key)
elif token is None and api_key is None: elif token is None and api_key is None:
@ -378,10 +393,11 @@ class IBMWatsonXAI(BaseLLM):
print_verbose: Callable, print_verbose: Callable,
encoding, encoding,
logging_obj, logging_obj,
optional_params: dict, optional_params=None,
litellm_params: Optional[dict] = None, acompletion=None,
litellm_params=None,
logger_fn=None, logger_fn=None,
timeout: Optional[float] = None, timeout=None,
): ):
""" """
Send a text generation request to the IBM Watsonx.ai API. Send a text generation request to the IBM Watsonx.ai API.
@ -402,12 +418,12 @@ class IBMWatsonXAI(BaseLLM):
model, messages, provider, custom_prompt_dict model, messages, provider, custom_prompt_dict
) )
def process_text_request(request_params: dict) -> ModelResponse: def process_text_gen_response(json_resp: dict) -> ModelResponse:
with self._manage_response( if "results" not in json_resp:
request_params, logging_obj=logging_obj, input=prompt, timeout=timeout raise WatsonXAIError(
) as resp: status_code=500,
json_resp = resp.json() message=f"Error: Invalid response from Watsonx.ai API: {json_resp}",
)
generated_text = json_resp["results"][0]["generated_text"] generated_text = json_resp["results"][0]["generated_text"]
prompt_tokens = json_resp["results"][0]["input_token_count"] prompt_tokens = json_resp["results"][0]["input_token_count"]
completion_tokens = json_resp["results"][0]["generated_token_count"] completion_tokens = json_resp["results"][0]["generated_token_count"]
@ -415,36 +431,70 @@ class IBMWatsonXAI(BaseLLM):
model_response["finish_reason"] = json_resp["results"][0]["stop_reason"] model_response["finish_reason"] = json_resp["results"][0]["stop_reason"]
model_response["created"] = int(time.time()) model_response["created"] = int(time.time())
model_response["model"] = model model_response["model"] = model
setattr( usage = Usage(
model_response, prompt_tokens=prompt_tokens,
"usage", completion_tokens=completion_tokens,
Usage( total_tokens=prompt_tokens + completion_tokens,
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
) )
setattr(model_response, "usage", usage)
return model_response return model_response
def process_stream_request( def process_stream_response(
request_params: dict, stream_resp: Union[Iterator[str], AsyncIterator],
) -> litellm.CustomStreamWrapper: ) -> litellm.CustomStreamWrapper:
streamwrapper = litellm.CustomStreamWrapper(
stream_resp,
model=model,
custom_llm_provider="watsonx",
logging_obj=logging_obj,
)
return streamwrapper
# create the function to manage the request to watsonx.ai
self.request_manager = RequestManager(logging_obj)
def handle_text_request(request_params: dict) -> ModelResponse:
with self.request_manager.request(
request_params,
input=prompt,
timeout=timeout,
) as resp:
json_resp = resp.json()
return process_text_gen_response(json_resp)
async def handle_text_request_async(request_params: dict) -> ModelResponse:
async with self.request_manager.async_request(
request_params,
input=prompt,
timeout=timeout,
) as resp:
json_resp = resp.json()
return process_text_gen_response(json_resp)
def handle_stream_request(request_params: dict) -> litellm.CustomStreamWrapper:
# stream the response - generated chunks will be handled # stream the response - generated chunks will be handled
# by litellm.utils.CustomStreamWrapper.handle_watsonx_stream # by litellm.utils.CustomStreamWrapper.handle_watsonx_stream
with self._manage_response( with self.request_manager.request(
request_params, request_params,
logging_obj=logging_obj,
stream=True, stream=True,
input=prompt, input=prompt,
timeout=timeout, timeout=timeout,
) as resp: ) as resp:
response = litellm.CustomStreamWrapper( streamwrapper = process_stream_response(resp.iter_lines())
resp.iter_lines(), return streamwrapper
model=model,
custom_llm_provider="watsonx", async def handle_stream_request_async(request_params: dict) -> litellm.CustomStreamWrapper:
logging_obj=logging_obj, # stream the response - generated chunks will be handled
) # by litellm.utils.CustomStreamWrapper.handle_watsonx_stream
return response async with self.request_manager.async_request(
request_params,
stream=True,
input=prompt,
timeout=timeout,
) as resp:
streamwrapper = process_stream_response(resp.aiter_lines())
return streamwrapper
try: try:
## Get the response from the model ## Get the response from the model
@ -455,10 +505,18 @@ class IBMWatsonXAI(BaseLLM):
optional_params=optional_params, optional_params=optional_params,
print_verbose=print_verbose, print_verbose=print_verbose,
) )
if stream: if stream and (acompletion is True):
return process_stream_request(req_params) # stream and async text generation
return handle_stream_request_async(req_params)
elif stream:
# streaming text generation
return handle_stream_request(req_params)
elif (acompletion is True):
# async text generation
return handle_text_request_async(req_params)
else: else:
return process_text_request(req_params) # regular text generation
return handle_text_request(req_params)
except WatsonXAIError as e: except WatsonXAIError as e:
raise e raise e
except Exception as e: except Exception as e:
@ -473,6 +531,7 @@ class IBMWatsonXAI(BaseLLM):
model_response=None, model_response=None,
optional_params=None, optional_params=None,
encoding=None, encoding=None,
aembedding=None,
): ):
""" """
Send a text embedding request to the IBM Watsonx.ai API. Send a text embedding request to the IBM Watsonx.ai API.
@ -507,9 +566,6 @@ class IBMWatsonXAI(BaseLLM):
} }
request_params = dict(version=api_params["api_version"]) request_params = dict(version=api_params["api_version"])
url = api_params["url"].rstrip("/") + WatsonXAIEndpoint.EMBEDDINGS url = api_params["url"].rstrip("/") + WatsonXAIEndpoint.EMBEDDINGS
# request = httpx.Request(
# "POST", url, headers=headers, json=payload, params=request_params
# )
req_params = { req_params = {
"method": "POST", "method": "POST",
"url": url, "url": url,
@ -517,25 +573,49 @@ class IBMWatsonXAI(BaseLLM):
"json": payload, "json": payload,
"params": request_params, "params": request_params,
} }
with self._manage_response( request_manager = RequestManager(logging_obj)
req_params, logging_obj=logging_obj, input=input
) as resp:
json_resp = resp.json()
results = json_resp.get("results", []) def process_embedding_response(json_resp: dict) -> ModelResponse:
embedding_response = [] results = json_resp.get("results", [])
for idx, result in enumerate(results): embedding_response = []
embedding_response.append( for idx, result in enumerate(results):
{"object": "embedding", "index": idx, "embedding": result["embedding"]} embedding_response.append(
{
"object": "embedding",
"index": idx,
"embedding": result["embedding"],
}
)
model_response["object"] = "list"
model_response["data"] = embedding_response
model_response["model"] = model
input_tokens = json_resp.get("input_token_count", 0)
model_response.usage = Usage(
prompt_tokens=input_tokens,
completion_tokens=0,
total_tokens=input_tokens,
) )
model_response["object"] = "list" return model_response
model_response["data"] = embedding_response
model_response["model"] = model def handle_embedding(request_params: dict) -> ModelResponse:
input_tokens = json_resp.get("input_token_count", 0) with request_manager.request(request_params, input=input) as resp:
model_response.usage = Usage( json_resp = resp.json()
prompt_tokens=input_tokens, completion_tokens=0, total_tokens=input_tokens return process_embedding_response(json_resp)
)
return model_response async def handle_aembedding(request_params: dict) -> ModelResponse:
async with request_manager.async_request(request_params, input=input) as resp:
json_resp = resp.json()
return process_embedding_response(json_resp)
try:
if aembedding is True:
return handle_embedding(req_params)
else:
return handle_aembedding(req_params)
except WatsonXAIError as e:
raise e
except Exception as e:
raise WatsonXAIError(status_code=500, message=str(e))
def generate_iam_token(self, api_key=None, **params): def generate_iam_token(self, api_key=None, **params):
headers = {} headers = {}
@ -558,52 +638,144 @@ class IBMWatsonXAI(BaseLLM):
self.token = iam_access_token self.token = iam_access_token
return iam_access_token return iam_access_token
@contextmanager def get_available_models(self, *, ids_only: bool = True, **params):
def _manage_response( api_params = self._get_api_params(params)
self, headers = {
request_params: dict, "Authorization": f"Bearer {api_params['token']}",
logging_obj: Any, "Content-Type": "application/json",
stream: bool = False, "Accept": "application/json",
input: Optional[Any] = None, }
timeout: Optional[float] = None, request_params = dict(version=api_params["api_version"])
): url = api_params["url"].rstrip("/") + WatsonXAIEndpoint.AVAILABLE_MODELS
request_str = ( req_params = dict(method="GET", url=url, headers=headers, params=request_params)
f"response = {request_params['method']}(\n" with RequestManager(logging_obj=None).request(req_params) as resp:
f"\turl={request_params['url']},\n" json_resp = resp.json()
f"\tjson={request_params['json']},\n" if not ids_only:
f")" return json_resp
) return [res["model_id"] for res in json_resp["resources"]]
logging_obj.pre_call(
input=input, class RequestManager:
api_key=request_params["headers"].get("Authorization"), """
additional_args={ Returns a context manager that manages the response from the request.
"complete_input_dict": request_params["json"], if async_ is True, returns an async context manager, otherwise returns a regular context manager.
"request_str": request_str,
}, Usage:
) ```python
if timeout: request_params = dict(method="POST", url="https://api.example.com", headers={"Authorization" : "Bearer token"}, json={"key": "value"})
request_params["timeout"] = timeout request_manager = RequestManager(logging_obj=logging_obj)
try: async with request_manager.request(request_params) as resp:
if stream: ...
resp = requests.request( # or
**request_params, with request_manager.async_request(request_params) as resp:
stream=True, ...
) ```
resp.raise_for_status() """
yield resp
else: def __init__(self, logging_obj=None):
resp = requests.request(**request_params) self.logging_obj = logging_obj
resp.raise_for_status()
yield resp def pre_call(
except Exception as e: self,
raise WatsonXAIError(status_code=500, message=str(e)) request_params: dict,
if not stream: input: Optional[Any] = None,
logging_obj.post_call( ):
if self.logging_obj is None:
return
request_str = (
f"response = {request_params['method']}(\n"
f"\turl={request_params['url']},\n"
f"\tjson={request_params.get('json')},\n"
f")"
)
self.logging_obj.pre_call(
input=input,
api_key=request_params["headers"].get("Authorization"),
additional_args={
"complete_input_dict": request_params.get("json"),
"request_str": request_str,
},
)
def post_call(self, resp, request_params):
if self.logging_obj is None:
return
self.logging_obj.post_call(
input=input, input=input,
api_key=request_params["headers"].get("Authorization"), api_key=request_params["headers"].get("Authorization"),
original_response=json.dumps(resp.json()), original_response=json.dumps(resp.json()),
additional_args={ additional_args={
"status_code": resp.status_code, "status_code": resp.status_code,
"complete_input_dict": request_params["json"], "complete_input_dict": request_params.get(
"data", request_params.get("json")
),
}, },
) )
@contextmanager
def request(
self,
request_params: dict,
stream: bool = False,
input: Optional[Any] = None,
timeout=None,
) -> Generator[requests.Response, None, None]:
"""
Returns a context manager that yields the response from the request.
"""
self.pre_call(request_params, input)
if timeout:
request_params["timeout"] = timeout
if stream:
request_params["stream"] = stream
try:
resp = requests.request(**request_params)
if not resp.ok:
raise WatsonXAIError(
status_code=resp.status_code,
message=f"Error {resp.status_code} ({resp.reason}): {resp.text}",
)
yield resp
except Exception as e:
raise WatsonXAIError(status_code=500, message=str(e))
if not stream:
self.post_call(resp, request_params)
@asynccontextmanager
async def async_request(
self,
request_params: dict,
stream: bool = False,
input: Optional[Any] = None,
timeout=None,
) -> AsyncGenerator[httpx.Response, None]:
self.pre_call(request_params, input)
if timeout:
request_params["timeout"] = timeout
if stream:
request_params["stream"] = stream
try:
# async with AsyncHTTPHandler(timeout=timeout) as client:
self.async_handler = AsyncHTTPHandler(
timeout=httpx.Timeout(
timeout=request_params.pop("timeout", 600.0), connect=5.0
),
)
# async_handler.client.verify = False
if "json" in request_params:
request_params["data"] = json.dumps(request_params.pop("json", {}))
method = request_params.pop("method")
if method.upper() == "POST":
resp = await self.async_handler.post(**request_params)
else:
resp = await self.async_handler.get(**request_params)
if resp.status_code not in [200, 201]:
raise WatsonXAIError(
status_code=resp.status_code,
message=f"Error {resp.status_code} ({resp.reason}): {resp.text}",
)
yield resp
# await async_handler.close()
except Exception as e:
raise WatsonXAIError(status_code=500, message=str(e))
if not stream:
self.post_call(resp, request_params)

View file

@ -3236,6 +3236,24 @@ def test_completion_watsonx():
except Exception as e: except Exception as e:
pytest.fail(f"Error occurred: {e}") pytest.fail(f"Error occurred: {e}")
def test_completion_stream_watsonx():
litellm.set_verbose = True
model_name = "watsonx/ibm/granite-13b-chat-v2"
try:
response = completion(
model=model_name,
messages=messages,
stop=["stop"],
max_tokens=20,
stream=True
)
for chunk in response:
print(chunk)
except litellm.APIError as e:
pass
except Exception as e:
pytest.fail(f"Error occurred: {e}")
@pytest.mark.parametrize( @pytest.mark.parametrize(
"provider, model, project, region_name, token", "provider, model, project, region_name, token",
@ -3300,6 +3318,25 @@ async def test_acompletion_watsonx():
except Exception as e: except Exception as e:
pytest.fail(f"Error occurred: {e}") pytest.fail(f"Error occurred: {e}")
@pytest.mark.asyncio
async def test_acompletion_stream_watsonx():
litellm.set_verbose = True
model_name = "watsonx/ibm/granite-13b-chat-v2"
print("testing watsonx")
try:
response = await litellm.acompletion(
model=model_name,
messages=messages,
temperature=0.2,
max_tokens=80,
stream=True
)
# Add any assertions here to check the response
async for chunk in response:
print(chunk)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
# test_completion_palm_stream() # test_completion_palm_stream()

View file

@ -10430,7 +10430,7 @@ class CustomStreamWrapper:
response = chunk.replace("data: ", "").strip() response = chunk.replace("data: ", "").strip()
parsed_response = json.loads(response) parsed_response = json.loads(response)
else: else:
return {"text": "", "is_finished": False} return {"text": "", "is_finished": False, "prompt_tokens": 0, "completion_tokens": 0}
else: else:
print_verbose(f"chunk: {chunk} (Type: {type(chunk)})") print_verbose(f"chunk: {chunk} (Type: {type(chunk)})")
raise ValueError( raise ValueError(
@ -10445,8 +10445,8 @@ class CustomStreamWrapper:
"text": text, "text": text,
"is_finished": is_finished, "is_finished": is_finished,
"finish_reason": finish_reason, "finish_reason": finish_reason,
"prompt_tokens": results[0].get("input_token_count", None), "prompt_tokens": results[0].get("input_token_count", 0),
"completion_tokens": results[0].get("generated_token_count", None), "completion_tokens": results[0].get("generated_token_count", 0),
} }
return {"text": "", "is_finished": False} return {"text": "", "is_finished": False}
except Exception as e: except Exception as e: