From 42926731fd4e4c513982f6fa4249fcdde6f797e0 Mon Sep 17 00:00:00 2001 From: Swapna Lekkala Date: Tue, 9 Sep 2025 11:22:03 -0700 Subject: [PATCH] fix docs --- .../langchain/Llama_Stack_LangChain.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/notebooks/langchain/Llama_Stack_LangChain.ipynb b/docs/notebooks/langchain/Llama_Stack_LangChain.ipynb index afbffd931..7f875fdde 100644 --- a/docs/notebooks/langchain/Llama_Stack_LangChain.ipynb +++ b/docs/notebooks/langchain/Llama_Stack_LangChain.ipynb @@ -17,7 +17,7 @@ "\n", "### What You'll See\n", "\n", - "1. Setting up LlamaStack server with Together AI provider\n", + "1. Setting up LlamaStack server with Fireworks AI provider\n", "2. Creating and Querying Vector Stores\n", "3. Building RAG chains with LangChain + LLAMAStack\n", "4. Querying the chain for relevant information\n", @@ -76,7 +76,7 @@ "#### Build and Start LlamaStack Server\n", "\n", "This section sets up the LlamaStack server with:\n", - "- **Together AI** as the inference provider\n", + "- **Fireworks AI** as the inference provider\n", "- **Sentence Transformers** for embeddings\n", "\n", "The server runs on `localhost:8321` and provides OpenAI-compatible endpoints." @@ -205,7 +205,7 @@ "\n", "Create a client connection to the LlamaStack server with API keys for different providers:\n", "\n", - "- **Fireworks API Key**: For Together AI models\n", + "- **Fireworks API Key**: For Fireworks models\n", "\n" ] }, @@ -496,8 +496,8 @@ "Set up LangChain to use LlamaStack's OpenAI-compatible API:\n", "\n", "- **Base URL**: Points to LlamaStack's OpenAI endpoint\n", - "- **Headers**: Include Together AI API key for model access\n", - "- **Model**: Use Meta Llama 3.1 8B model via Together AI" + "- **Headers**: Include Fireworks API key for model access\n", + "- **Model**: Use Meta Llama v3p1 8b instruct model for inference" ] }, { @@ -576,7 +576,7 @@ "\n", "Build a LangChain pipeline that combines:\n", "\n", - "1. **Vector Search**: Query LlamaStack's vector database\n", + "1. **Vector Search**: Query LlamaStack's Open AI compatible Vector Store\n", "2. **Context Assembly**: Format retrieved documents\n", "3. **Prompt Template**: Structure the input for the LLM\n", "4. **LLM Generation**: Generate answers using context\n", @@ -714,7 +714,7 @@ "\n", "- **LlamaStack** for infrastructure (LLM serving + Vector Store)\n", "- **LangChain** for orchestration (prompts + chains)\n", - "- **Together AI** for high-quality language models\n", + "- **Fireworks** for high-quality language models\n", "\n", "### Key Benefits\n", "\n",