mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
trying to add docs
This commit is contained in:
parent
0fe8799f94
commit
2cf949990e
834 changed files with 0 additions and 161273 deletions
|
@ -1,46 +0,0 @@
|
|||
```python
|
||||
from langchain.output_parsers import CommaSeparatedListOutputParser
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
output_parser = CommaSeparatedListOutputParser()
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
format_instructions = output_parser.get_format_instructions()
|
||||
prompt = PromptTemplate(
|
||||
template="List five {subject}.\n{format_instructions}",
|
||||
input_variables=["subject"],
|
||||
partial_variables={"format_instructions": format_instructions}
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
model = OpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
_input = prompt.format(subject="ice cream flavors")
|
||||
output = model(_input)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output_parser.parse(output)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
['Vanilla',
|
||||
'Chocolate',
|
||||
'Strawberry',
|
||||
'Mint Chocolate Chip',
|
||||
'Cookies and Cream']
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
|
@ -1,76 +0,0 @@
|
|||
---
|
||||
sidebar_position: 2
|
||||
---
|
||||
Below we go over the main type of output parser, the `PydanticOutputParser`.
|
||||
|
||||
```python
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from typing import List
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
model_name = 'text-davinci-003'
|
||||
temperature = 0.0
|
||||
model = OpenAI(model_name=model_name, temperature=temperature)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
# Define your desired data structure.
|
||||
class Joke(BaseModel):
|
||||
setup: str = Field(description="question to set up a joke")
|
||||
punchline: str = Field(description="answer to resolve the joke")
|
||||
|
||||
# You can add custom validation logic easily with Pydantic.
|
||||
@validator('setup')
|
||||
def question_ends_with_question_mark(cls, field):
|
||||
if field[-1] != '?':
|
||||
raise ValueError("Badly formed question!")
|
||||
return field
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
# Set up a parser + inject instructions into the prompt template.
|
||||
parser = PydanticOutputParser(pydantic_object=Joke)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
prompt = PromptTemplate(
|
||||
template="Answer the user query.\n{format_instructions}\n{query}\n",
|
||||
input_variables=["query"],
|
||||
partial_variables={"format_instructions": parser.get_format_instructions()}
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
# And a query intended to prompt a language model to populate the data structure.
|
||||
joke_query = "Tell me a joke."
|
||||
_input = prompt.format_prompt(query=joke_query)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output = model(_input.to_string())
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
parser.parse(output)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
Joke(setup='Why did the chicken cross the road?', punchline='To get to the other side!')
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
|
@ -1,112 +0,0 @@
|
|||
For this example, we'll use the above Pydantic output parser. Here's what happens if we pass it a result that does not comply with the schema:
|
||||
|
||||
```python
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.output_parsers import PydanticOutputParser
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from typing import List
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
class Actor(BaseModel):
|
||||
name: str = Field(description="name of an actor")
|
||||
film_names: List[str] = Field(description="list of names of films they starred in")
|
||||
|
||||
actor_query = "Generate the filmography for a random actor."
|
||||
|
||||
parser = PydanticOutputParser(pydantic_object=Actor)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
misformatted = "{'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}"
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
parser.parse(misformatted)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
JSONDecodeError Traceback (most recent call last)
|
||||
|
||||
File ~/workplace/langchain/langchain/output_parsers/pydantic.py:23, in PydanticOutputParser.parse(self, text)
|
||||
22 json_str = match.group()
|
||||
---> 23 json_object = json.loads(json_str)
|
||||
24 return self.pydantic_object.parse_obj(json_object)
|
||||
|
||||
|
||||
File ~/.pyenv/versions/3.9.1/lib/python3.9/json/__init__.py:346, in loads(s, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
|
||||
343 if (cls is None and object_hook is None and
|
||||
344 parse_int is None and parse_float is None and
|
||||
345 parse_constant is None and object_pairs_hook is None and not kw):
|
||||
--> 346 return _default_decoder.decode(s)
|
||||
347 if cls is None:
|
||||
|
||||
|
||||
File ~/.pyenv/versions/3.9.1/lib/python3.9/json/decoder.py:337, in JSONDecoder.decode(self, s, _w)
|
||||
333 """Return the Python representation of ``s`` (a ``str`` instance
|
||||
334 containing a JSON document).
|
||||
335
|
||||
336 """
|
||||
--> 337 obj, end = self.raw_decode(s, idx=_w(s, 0).end())
|
||||
338 end = _w(s, end).end()
|
||||
|
||||
|
||||
File ~/.pyenv/versions/3.9.1/lib/python3.9/json/decoder.py:353, in JSONDecoder.raw_decode(self, s, idx)
|
||||
352 try:
|
||||
--> 353 obj, end = self.scan_once(s, idx)
|
||||
354 except StopIteration as err:
|
||||
|
||||
|
||||
JSONDecodeError: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
|
||||
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
|
||||
|
||||
OutputParserException Traceback (most recent call last)
|
||||
|
||||
Cell In[6], line 1
|
||||
----> 1 parser.parse(misformatted)
|
||||
|
||||
|
||||
File ~/workplace/langchain/langchain/output_parsers/pydantic.py:29, in PydanticOutputParser.parse(self, text)
|
||||
27 name = self.pydantic_object.__name__
|
||||
28 msg = f"Failed to parse {name} from completion {text}. Got: {e}"
|
||||
---> 29 raise OutputParserException(msg)
|
||||
|
||||
|
||||
OutputParserException: Failed to parse Actor from completion {'name': 'Tom Hanks', 'film_names': ['Forrest Gump']}. Got: Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
Now we can construct and use a `OutputFixingParser`. This output parser takes as an argument another output parser but also an LLM with which to try to correct any formatting mistakes.
|
||||
|
||||
|
||||
```python
|
||||
from langchain.output_parsers import OutputFixingParser
|
||||
|
||||
new_parser = OutputFixingParser.from_llm(parser=parser, llm=ChatOpenAI())
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
new_parser.parse(misformatted)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
Actor(name='Tom Hanks', film_names=['Forrest Gump'])
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
|
@ -1,93 +0,0 @@
|
|||
```python
|
||||
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
```
|
||||
|
||||
Here we define the response schema we want to receive.
|
||||
|
||||
|
||||
```python
|
||||
response_schemas = [
|
||||
ResponseSchema(name="answer", description="answer to the user's question"),
|
||||
ResponseSchema(name="source", description="source used to answer the user's question, should be a website.")
|
||||
]
|
||||
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
|
||||
```
|
||||
|
||||
We now get a string that contains instructions for how the response should be formatted, and we then insert that into our prompt.
|
||||
|
||||
|
||||
```python
|
||||
format_instructions = output_parser.get_format_instructions()
|
||||
prompt = PromptTemplate(
|
||||
template="answer the users question as best as possible.\n{format_instructions}\n{question}",
|
||||
input_variables=["question"],
|
||||
partial_variables={"format_instructions": format_instructions}
|
||||
)
|
||||
```
|
||||
|
||||
We can now use this to format a prompt to send to the language model, and then parse the returned result.
|
||||
|
||||
|
||||
```python
|
||||
model = OpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
_input = prompt.format_prompt(question="what's the capital of france?")
|
||||
output = model(_input.to_string())
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output_parser.parse(output)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
{'answer': 'Paris',
|
||||
'source': 'https://www.worldatlas.com/articles/what-is-the-capital-of-france.html'}
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
And here's an example of using this in a chat model
|
||||
|
||||
|
||||
```python
|
||||
chat_model = ChatOpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
prompt = ChatPromptTemplate(
|
||||
messages=[
|
||||
HumanMessagePromptTemplate.from_template("answer the users question as best as possible.\n{format_instructions}\n{question}")
|
||||
],
|
||||
input_variables=["question"],
|
||||
partial_variables={"format_instructions": format_instructions}
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
_input = prompt.format_prompt(question="what's the capital of france?")
|
||||
output = chat_model(_input.to_messages())
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output_parser.parse(output.content)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
{'answer': 'Paris', 'source': 'https://en.wikipedia.org/wiki/Paris'}
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
Loading…
Add table
Add a link
Reference in a new issue