diff --git a/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb b/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb index 0b15adaff..78d51f8fc 100644 --- a/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb +++ b/docs/notebooks/Llama_Stack_Agent_Workflows.ipynb @@ -43,9 +43,566 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 114, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Not in Google Colab environment\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Warning: `bwrap` is not available. Code interpreter tool will not work correctly.\n" + ] + }, + { + "data": { + "text/html": [ + "
Using config fireworks:\n",
+ "
\n"
+ ],
+ "text/plain": [
+ "Using config \u001b[34mfireworks\u001b[0m:\n"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "text/html": [
+ "apis:\n", + "- agents\n", + "- datasetio\n", + "- eval\n", + "- inference\n", + "- safety\n", + "- scoring\n", + "- telemetry\n", + "- tool_runtime\n", + "- vector_io\n", + "benchmarks: []\n", + "container_image: null\n", + "datasets: []\n", + "image_name: fireworks\n", + "metadata_store:\n", + " db_path: /Users/xiyan/.llama/distributions/fireworks/registry.db\n", + " namespace: null\n", + " type: sqlite\n", + "models:\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p1-8b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-8b-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.1-8B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-8b-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p1-70b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-70b-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.1-70B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-70b-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p1-405b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.1-405B-Instruct-FP8\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p2-1b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.2-1B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p2-3b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-3b-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.2-3B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-3b-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.2-11B-Vision-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.2-90B-Vision-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-v3p3-70b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p3-70b-instruct\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-3.3-70B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p3-70b-instruct\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-guard-3-8b\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-3-8b\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-Guard-3-8B\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-3-8b\n", + "- metadata: {}\n", + " model_id: accounts/fireworks/models/llama-guard-3-11b-vision\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-3-11b-vision\n", + "- metadata: {}\n", + " model_id: meta-llama/Llama-Guard-3-11B-Vision\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-3-11b-vision\n", + "- metadata:\n", + " context_length: 8192\n", + " embedding_dimension: 768\n", + " model_id: nomic-ai/nomic-embed-text-v1.5\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - embedding\n", + " provider_id: fireworks\n", + " provider_model_id: nomic-ai/nomic-embed-text-v1.5\n", + "- metadata:\n", + " embedding_dimension: 384\n", + " model_id: all-MiniLM-L6-v2\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - embedding\n", + " provider_id: sentence-transformers\n", + " provider_model_id: null\n", + "providers:\n", + " agents:\n", + " - config:\n", + " persistence_store:\n", + " db_path: /Users/xiyan/.llama/distributions/fireworks/agents_store.db\n", + " namespace: null\n", + " type: sqlite\n", + " provider_id: meta-reference\n", + " provider_type: inline::meta-reference\n", + " datasetio:\n", + " - config: {}\n", + " provider_id: huggingface\n", + " provider_type: remote::huggingface\n", + " - config: {}\n", + " provider_id: localfs\n", + " provider_type: inline::localfs\n", + " eval:\n", + " - config: {}\n", + " provider_id: meta-reference\n", + " provider_type: inline::meta-reference\n", + " inference:\n", + " - config:\n", + " api_key: '********'\n", + " url: https://api.fireworks.ai/inference/v1\n", + " provider_id: fireworks\n", + " provider_type: remote::fireworks\n", + " - config: {}\n", + " provider_id: sentence-transformers\n", + " provider_type: inline::sentence-transformers\n", + " safety:\n", + " - config: {}\n", + " provider_id: llama-guard\n", + " provider_type: inline::llama-guard\n", + " scoring:\n", + " - config: {}\n", + " provider_id: basic\n", + " provider_type: inline::basic\n", + " - config: {}\n", + " provider_id: llm-as-judge\n", + " provider_type: inline::llm-as-judge\n", + " - config:\n", + " openai_api_key: '********'\n", + " provider_id: braintrust\n", + " provider_type: inline::braintrust\n", + " telemetry:\n", + " - config:\n", + " service_name: llama-stack\n", + " sinks: sqlite\n", + " sqlite_db_path: /Users/xiyan/.llama/distributions/fireworks/trace_store.db\n", + " provider_id: meta-reference\n", + " provider_type: inline::meta-reference\n", + " tool_runtime:\n", + " - config:\n", + " api_key: '********'\n", + " max_results: 3\n", + " provider_id: brave-search\n", + " provider_type: remote::brave-search\n", + " - config:\n", + " api_key: '********'\n", + " max_results: 3\n", + " provider_id: tavily-search\n", + " provider_type: remote::tavily-search\n", + " - config: {}\n", + " provider_id: code-interpreter\n", + " provider_type: inline::code-interpreter\n", + " - config: {}\n", + " provider_id: rag-runtime\n", + " provider_type: inline::rag-runtime\n", + " - config: {}\n", + " provider_id: model-context-protocol\n", + " provider_type: remote::model-context-protocol\n", + " vector_io:\n", + " - config:\n", + " kvstore:\n", + " db_path: /Users/xiyan/.llama/distributions/fireworks/faiss_store.db\n", + " namespace: null\n", + " type: sqlite\n", + " provider_id: faiss\n", + " provider_type: inline::faiss\n", + "scoring_fns: []\n", + "server:\n", + " port: 8321\n", + " tls_certfile: null\n", + " tls_keyfile: null\n", + "shields:\n", + "- params: null\n", + " provider_id: null\n", + " provider_shield_id: null\n", + " shield_id: meta-llama/Llama-Guard-3-8B\n", + "tool_groups:\n", + "- args: null\n", + " mcp_endpoint: null\n", + " provider_id: tavily-search\n", + " toolgroup_id: builtin::websearch\n", + "- args: null\n", + " mcp_endpoint: null\n", + " provider_id: rag-runtime\n", + " toolgroup_id: builtin::rag\n", + "- args: null\n", + " mcp_endpoint: null\n", + " provider_id: code-interpreter\n", + " toolgroup_id: builtin::code_interpreter\n", + "vector_dbs: []\n", + "version: '2'\n", + "\n", + "\n" + ], + "text/plain": [ + "apis:\n", + "- agents\n", + "- datasetio\n", + "- eval\n", + "- inference\n", + "- safety\n", + "- scoring\n", + "- telemetry\n", + "- tool_runtime\n", + "- vector_io\n", + "benchmarks: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n", + "container_image: null\n", + "datasets: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n", + "image_name: fireworks\n", + "metadata_store:\n", + " db_path: \u001b[35m/Users/xiyan/.llama/distributions/fireworks/\u001b[0m\u001b[95mregistry.db\u001b[0m\n", + " namespace: null\n", + " type: sqlite\n", + "models:\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p1-8b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-8b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-8b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p1-70b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-70b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-70b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p1-405b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-FP8\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p1-405b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p2-1b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-1B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-1b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p2-3b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-3b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-3b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-11b-vision-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p2-90b-vision-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-v3p3-70b-instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p3-70b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-v3p3-70b-instruct\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-guard-\u001b[1;36m3\u001b[0m-8b\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-\u001b[1;36m3\u001b[0m-8b\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-\u001b[1;36m3\u001b[0m-8b\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: accounts/fireworks/models/llama-guard-\u001b[1;36m3\u001b[0m-11b-vision\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-\u001b[1;36m3\u001b[0m-11b-vision\n", + "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - llm\n", + " provider_id: fireworks\n", + " provider_model_id: accounts/fireworks/models/llama-guard-\u001b[1;36m3\u001b[0m-11b-vision\n", + "- metadata:\n", + " context_length: \u001b[1;36m8192\u001b[0m\n", + " embedding_dimension: \u001b[1;36m768\u001b[0m\n", + " model_id: nomic-ai/nomic-embed-text-v1.\u001b[1;36m5\u001b[0m\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - embedding\n", + " provider_id: fireworks\n", + " provider_model_id: nomic-ai/nomic-embed-text-v1.\u001b[1;36m5\u001b[0m\n", + "- metadata:\n", + " embedding_dimension: \u001b[1;36m384\u001b[0m\n", + " model_id: all-MiniLM-L6-v2\n", + " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", + " - embedding\n", + " provider_id: sentence-transformers\n", + " provider_model_id: null\n", + "providers:\n", + " agents:\n", + " - config:\n", + " persistence_store:\n", + " db_path: \u001b[35m/Users/xiyan/.llama/distributions/fireworks/\u001b[0m\u001b[95magents_store.db\u001b[0m\n", + " namespace: null\n", + " type: sqlite\n", + " provider_id: meta-reference\n", + " provider_type: inline::meta-reference\n", + " datasetio:\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: huggingface\n", + " provider_type: remote::huggingface\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: localfs\n", + " provider_type: inline::localfs\n", + " eval:\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: meta-reference\n", + " provider_type: inline::meta-reference\n", + " inference:\n", + " - config:\n", + " api_key: \u001b[32m'********'\u001b[0m\n", + " url: \u001b[4;94mhttps://api.fireworks.ai/inference/v1\u001b[0m\n", + " provider_id: fireworks\n", + " provider_type: remot\u001b[1;92me::f\u001b[0mireworks\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: sentence-transformers\n", + " provider_type: inline::sentence-transformers\n", + " safety:\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: llama-guard\n", + " provider_type: inline::llama-guard\n", + " scoring:\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: basic\n", + " provider_type: inlin\u001b[1;92me::ba\u001b[0msic\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: llm-as-judge\n", + " provider_type: inline::llm-as-judge\n", + " - config:\n", + " openai_api_key: \u001b[32m'********'\u001b[0m\n", + " provider_id: braintrust\n", + " provider_type: inlin\u001b[1;92me::b\u001b[0mraintrust\n", + " telemetry:\n", + " - config:\n", + " service_name: llama-stack\n", + " sinks: sqlite\n", + " sqlite_db_path: \u001b[35m/Users/xiyan/.llama/distributions/fireworks/\u001b[0m\u001b[95mtrace_store.db\u001b[0m\n", + " provider_id: meta-reference\n", + " provider_type: inline::meta-reference\n", + " tool_runtime:\n", + " - config:\n", + " api_key: \u001b[32m'********'\u001b[0m\n", + " max_results: \u001b[1;36m3\u001b[0m\n", + " provider_id: brave-search\n", + " provider_type: remot\u001b[1;92me::b\u001b[0mrave-search\n", + " - config:\n", + " api_key: \u001b[32m'********'\u001b[0m\n", + " max_results: \u001b[1;36m3\u001b[0m\n", + " provider_id: tavily-search\n", + " provider_type: remote::tavily-search\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: code-interpreter\n", + " provider_type: inlin\u001b[1;92me::c\u001b[0mode-interpreter\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: rag-runtime\n", + " provider_type: inline::rag-runtime\n", + " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", + " provider_id: model-context-protocol\n", + " provider_type: remote::model-context-protocol\n", + " vector_io:\n", + " - config:\n", + " kvstore:\n", + " db_path: \u001b[35m/Users/xiyan/.llama/distributions/fireworks/\u001b[0m\u001b[95mfaiss_store.db\u001b[0m\n", + " namespace: null\n", + " type: sqlite\n", + " provider_id: faiss\n", + " provider_type: inlin\u001b[1;92me::fa\u001b[0miss\n", + "scoring_fns: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n", + "server:\n", + " port: \u001b[1;36m8321\u001b[0m\n", + " tls_certfile: null\n", + " tls_keyfile: null\n", + "shields:\n", + "- params: null\n", + " provider_id: null\n", + " provider_shield_id: null\n", + " shield_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n", + "tool_groups:\n", + "- args: null\n", + " mcp_endpoint: null\n", + " provider_id: tavily-search\n", + " toolgroup_id: builtin::websearch\n", + "- args: null\n", + " mcp_endpoint: null\n", + " provider_id: rag-runtime\n", + " toolgroup_id: builtin::rag\n", + "- args: null\n", + " mcp_endpoint: null\n", + " provider_id: code-interpreter\n", + " toolgroup_id: builtin::code_interpreter\n", + "vector_dbs: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n", + "version: \u001b[32m'2'\u001b[0m\n", + "\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "from llama_stack_client import LlamaStackClient\n", "from llama_stack.distribution.library_client import LlamaStackAsLibraryClient\n", @@ -57,22 +614,12 @@ "from pydantic import BaseModel\n", "import rich\n", "import os\n", - "\n", "try:\n", " from google.colab import userdata\n", " os.environ['FIREWORKS_API_KEY'] = userdata.get('FIREWORKS_API_KEY')\n", "except ImportError:\n", " print(\"Not in Google Colab environment\")\n", "\n", - "for key in ['FIREWORKS_API_KEY']:\n", - " try:\n", - " api_key = os.environ[key]\n", - " if not api_key:\n", - " raise ValueError(f\"{key} environment variable is empty\")\n", - " except KeyError:\n", - " api_key = input(f\"{key} environment variable is not set. Please enter your API key: \")\n", - " os.environ[key] = api_key\n", - "\n", "client = LlamaStackAsLibraryClient(\"fireworks\", provider_data = {\"fireworks_api_key\": os.environ['FIREWORKS_API_KEY']})\n", "_ = client.initialize()\n", "\n", @@ -1569,7 +2116,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 110, "metadata": {}, "outputs": [], "source": [ @@ -1670,7 +2217,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 113, "metadata": {}, "outputs": [ { @@ -1680,22 +2227,25 @@ "```python\n", "class MinStack:\n", " def __init__(self):\n", - " self.main_stack = []\n", + " self.stack = []\n", " self.min_stack = []\n", - "\n", + " \n", " def push(self, x: int) -> None:\n", - " self.main_stack.append(x)\n", + " self.stack.append(x)\n", " if not self.min_stack or x <= self.min_stack[-1]:\n", " self.min_stack.append(x)\n", - "\n", + " \n", " def pop(self) -> None:\n", - " if self.main_stack:\n", - " if self.main_stack[-1] == self.min_stack[-1]:\n", + " if self.stack:\n", + " if self.stack[-1] == self.min_stack[-1]:\n", " self.min_stack.pop()\n", - " self.main_stack.pop()\n", - "\n", + " self.stack.pop()\n", + " \n", " def getMin(self) -> int:\n", - " return self.min_stack[-1]\n", + " if self.min_stack:\n", + " return self.min_stack[-1]\n", + " else:\n", + " return None\n", "```\n" ] } @@ -1709,7 +2259,8 @@ "All operations should be O(1).\n", "\"\"\"\n", "\n", - "print(generator_evaluator_workflow(coding_task)[\"response\"])" + "output = generator_evaluator_workflow(coding_task)\n", + "print(output[\"response\"])" ] }, {