mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Litellm dev 12 30 2024 p2 (#7495)
* test(azure_openai_o1.py): initial commit with testing for azure openai o1 preview model * fix(base_llm_unit_tests.py): handle azure o1 preview response format tests skip as o1 on azure doesn't support tool calling yet * fix: initial commit of azure o1 handler using openai caller simplifies calling + allows fake streaming logic alr. implemented for openai to just work * feat(azure/o1_handler.py): fake o1 streaming for azure o1 models azure does not currently support streaming for o1 * feat(o1_transformation.py): support overriding 'should_fake_stream' on azure/o1 via 'supports_native_streaming' param on model info enables user to toggle on when azure allows o1 streaming without needing to bump versions * style(router.py): remove 'give feedback/get help' messaging when router is used Prevents noisy messaging Closes https://github.com/BerriAI/litellm/issues/5942 * fix(types/utils.py): handle none logprobs Fixes https://github.com/BerriAI/litellm/issues/328 * fix(exception_mapping_utils.py): fix error str unbound error * refactor(azure_ai/): move to openai_like chat completion handler allows for easy swapping of api base url's (e.g. ai.services.com) Fixes https://github.com/BerriAI/litellm/issues/7275 * refactor(azure_ai/): move to base llm http handler * fix(azure_ai/): handle differing api endpoints * fix(azure_ai/): make sure all unit tests are passing * fix: fix linting errors * fix: fix linting errors * fix: fix linting error * fix: fix linting errors * fix(azure_ai/transformation.py): handle extra body param * fix(azure_ai/transformation.py): fix max retries param handling * fix: fix test * test(test_azure_o1.py): fix test * fix(llm_http_handler.py): support handling azure ai unprocessable entity error * fix(llm_http_handler.py): handle sync invalid param error for azure ai * fix(azure_ai/): streaming support with base_llm_http_handler * fix(llm_http_handler.py): working sync stream calls with unprocessable entity handling for azure ai * fix: fix linting errors * fix(llm_http_handler.py): fix linting error * fix(azure_ai/): handle cohere tool call invalid index param error
This commit is contained in:
parent
0f1b298fe0
commit
0120176541
42 changed files with 638 additions and 192 deletions
|
@ -724,12 +724,14 @@ class Huggingface(BaseLLM):
|
|||
token_logprob = token["logprob"]
|
||||
|
||||
# Add the token information to the 'token_info' list
|
||||
_logprob.tokens.append(token_text)
|
||||
_logprob.token_logprobs.append(token_logprob)
|
||||
cast(List[str], _logprob.tokens).append(token_text)
|
||||
cast(List[float], _logprob.token_logprobs).append(token_logprob)
|
||||
|
||||
# stub this to work with llm eval harness
|
||||
top_alt_tokens = {"": -1.0, "": -2.0, "": -3.0} # noqa: F601
|
||||
_logprob.top_logprobs.append(top_alt_tokens)
|
||||
cast(List[Dict[str, float]], _logprob.top_logprobs).append(
|
||||
top_alt_tokens
|
||||
)
|
||||
|
||||
# For each element in the 'tokens' list, extract the relevant information
|
||||
for i, token in enumerate(response_details["tokens"]):
|
||||
|
@ -751,13 +753,15 @@ class Huggingface(BaseLLM):
|
|||
top_alt_tokens[text] = logprob
|
||||
|
||||
# Add the token information to the 'token_info' list
|
||||
_logprob.tokens.append(token_text)
|
||||
_logprob.token_logprobs.append(token_logprob)
|
||||
_logprob.top_logprobs.append(top_alt_tokens)
|
||||
cast(List[str], _logprob.tokens).append(token_text)
|
||||
cast(List[float], _logprob.token_logprobs).append(token_logprob)
|
||||
cast(List[Dict[str, float]], _logprob.top_logprobs).append(
|
||||
top_alt_tokens
|
||||
)
|
||||
|
||||
# Add the text offset of the token
|
||||
# This is computed as the sum of the lengths of all previous tokens
|
||||
_logprob.text_offset.append(
|
||||
cast(List[int], _logprob.text_offset).append(
|
||||
sum(len(t["text"]) for t in response_details["tokens"][:i])
|
||||
)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue