llama-stack-mirror/docs/source/building_applications/langchain_langgraph/langgraph-agent-add.py
gabemontero 20fd5ff54c docs: add an AI frameworks with common OpenAI API compatibility section to AI Application Examples
This change attempts to build off of the existing "Agents vs. OpenAI Responses API" section of "AI Application Examples", and get into how several of the popular AI frameworks provide some form of OpenAI API compatibility, and how this fact can allow one to deploy such application on Llama Stack.

This change also
- introduces a simple LangChain/LangGraph example that runs on LLama Stack via use of OpenAI API compatibility API
- circles back to the Responses API, and introduces a page of external references to examples
- makes it clear that other OpenAI API compatibile AI frameworks can be added as the community has time to dive into them.
2025-09-18 17:32:33 -04:00

70 lines
2 KiB
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from langgraph.graph import StateGraph, END
from langchain_core.messages import HumanMessage, ToolMessage
from langchain.agents import tool
from langchain_openai import ChatOpenAI
from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
# --- Tool ---
@tool
def add_numbers(x: int, y: int) -> int:
"""Add two integers together."""
return x + y
tools = [add_numbers]
# --- LLM that supports function-calling ---
llm = ChatOpenAI(
model="ollama/llama3.2:3b-instruct-fp16",
openai_api_key="none",
openai_api_base="http://localhost:8321/v1/openai/v1"
).bind_tools(tools)
# --- Node that runs the agent ---
def agent_node(state):
messages = state["messages"]
if "scratchpad" in state:
messages += format_to_openai_tool_messages(state["scratchpad"])
response = llm.invoke(messages)
print(f"LLM returned Chat Completion object: {response.response_metadata}")
return {
"messages": messages + [response],
"intermediate_step": response,
}
# --- Node that executes tool call ---
def tool_node(state):
tool_call = state["intermediate_step"].tool_calls[0]
result = add_numbers.invoke(tool_call["args"])
return {
"messages": state["messages"] + [
ToolMessage(tool_call_id=tool_call["id"], content=str(result))
]
}
# --- Build LangGraph ---
graph = StateGraph(dict)
graph.add_node("agent", agent_node)
graph.add_node("tool", tool_node)
graph.set_entry_point("agent")
graph.add_edge("agent", "tool")
graph.add_edge("tool", END)
compiled_graph = graph.compile()
# --- Run it ---
initial_state = {
"messages": [HumanMessage(content="What is 16 plus 9?")]
}
final_state = compiled_graph.invoke(initial_state)
# --- Output ---
for msg in final_state["messages"]:
print(f"{msg.type.upper()}: {msg.content}")