forked from phoenix/litellm-mirror
trying to add docs
This commit is contained in:
parent
0fe8799f94
commit
2cf949990e
834 changed files with 0 additions and 161273 deletions
|
@ -1,93 +0,0 @@
|
|||
```python
|
||||
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
|
||||
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
```
|
||||
|
||||
Here we define the response schema we want to receive.
|
||||
|
||||
|
||||
```python
|
||||
response_schemas = [
|
||||
ResponseSchema(name="answer", description="answer to the user's question"),
|
||||
ResponseSchema(name="source", description="source used to answer the user's question, should be a website.")
|
||||
]
|
||||
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
|
||||
```
|
||||
|
||||
We now get a string that contains instructions for how the response should be formatted, and we then insert that into our prompt.
|
||||
|
||||
|
||||
```python
|
||||
format_instructions = output_parser.get_format_instructions()
|
||||
prompt = PromptTemplate(
|
||||
template="answer the users question as best as possible.\n{format_instructions}\n{question}",
|
||||
input_variables=["question"],
|
||||
partial_variables={"format_instructions": format_instructions}
|
||||
)
|
||||
```
|
||||
|
||||
We can now use this to format a prompt to send to the language model, and then parse the returned result.
|
||||
|
||||
|
||||
```python
|
||||
model = OpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
_input = prompt.format_prompt(question="what's the capital of france?")
|
||||
output = model(_input.to_string())
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output_parser.parse(output)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
{'answer': 'Paris',
|
||||
'source': 'https://www.worldatlas.com/articles/what-is-the-capital-of-france.html'}
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
||||
|
||||
And here's an example of using this in a chat model
|
||||
|
||||
|
||||
```python
|
||||
chat_model = ChatOpenAI(temperature=0)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
prompt = ChatPromptTemplate(
|
||||
messages=[
|
||||
HumanMessagePromptTemplate.from_template("answer the users question as best as possible.\n{format_instructions}\n{question}")
|
||||
],
|
||||
input_variables=["question"],
|
||||
partial_variables={"format_instructions": format_instructions}
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
_input = prompt.format_prompt(question="what's the capital of france?")
|
||||
output = chat_model(_input.to_messages())
|
||||
```
|
||||
|
||||
|
||||
```python
|
||||
output_parser.parse(output.content)
|
||||
```
|
||||
|
||||
<CodeOutputBlock lang="python">
|
||||
|
||||
```
|
||||
{'answer': 'Paris', 'source': 'https://en.wikipedia.org/wiki/Paris'}
|
||||
```
|
||||
|
||||
</CodeOutputBlock>
|
Loading…
Add table
Add a link
Reference in a new issue