From 14c23dd5de112b9857d740ef7184b569017a5388 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Sun, 5 Oct 2025 13:21:09 -0700 Subject: [PATCH] Clean up backup files --- llama_stack/distributions/dell/run.yaml.bak | 121 --------- .../meta-reference-gpu/run.yaml.bak | 128 --------- llama_stack/distributions/nvidia/run.yaml.bak | 105 -------- .../distributions/open-benchmark/run.yaml.bak | 241 ----------------- .../distributions/starter-gpu/run.yaml.bak | 248 ------------------ .../distributions/watsonx/run.yaml.bak | 219 ---------------- practice/check_test_cases.py | 0 practice/coin_change_practice.py | 184 ------------- practice/hangman_guesser.py | 243 ----------------- practice/rotate_image_practice.py | 140 ---------- 10 files changed, 1629 deletions(-) delete mode 100644 llama_stack/distributions/dell/run.yaml.bak delete mode 100644 llama_stack/distributions/meta-reference-gpu/run.yaml.bak delete mode 100644 llama_stack/distributions/nvidia/run.yaml.bak delete mode 100644 llama_stack/distributions/open-benchmark/run.yaml.bak delete mode 100644 llama_stack/distributions/starter-gpu/run.yaml.bak delete mode 100644 llama_stack/distributions/watsonx/run.yaml.bak delete mode 100644 practice/check_test_cases.py delete mode 100644 practice/coin_change_practice.py delete mode 100644 practice/hangman_guesser.py delete mode 100644 practice/rotate_image_practice.py diff --git a/llama_stack/distributions/dell/run.yaml.bak b/llama_stack/distributions/dell/run.yaml.bak deleted file mode 100644 index 322cd51d1..000000000 --- a/llama_stack/distributions/dell/run.yaml.bak +++ /dev/null @@ -1,121 +0,0 @@ -version: 2 -image_name: dell -apis: -- agents -- datasetio -- eval -- inference -- safety -- scoring -- telemetry -- tool_runtime -- vector_io -providers: - inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: ${env.DEH_URL} - - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - vector_io: - - provider_id: chromadb - provider_type: remote::chromadb - config: - url: ${env.CHROMADB_URL:=} - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell/}/chroma_remote_registry.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/agents_store.db - responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/responses_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=sqlite} - sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/trace_store.db - otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - eval: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/meta_reference_eval.db - datasetio: - - provider_id: huggingface - provider_type: remote::huggingface - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/huggingface_datasetio.db - - provider_id: localfs - provider_type: inline::localfs - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/localfs_datasetio.db - scoring: - - provider_id: basic - provider_type: inline::basic - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - - provider_id: braintrust - provider_type: inline::braintrust - config: - openai_api_key: ${env.OPENAI_API_KEY:=} - tool_runtime: - - provider_id: brave-search - provider_type: remote::brave-search - config: - api_key: ${env.BRAVE_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: tavily-search - provider_type: remote::tavily-search - config: - api_key: ${env.TAVILY_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: rag-runtime - provider_type: inline::rag-runtime -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/registry.db -inference_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/dell}/inference_store.db -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: tgi0 - model_type: llm -- metadata: - embedding_dimension: 384 - model_id: all-MiniLM-L6-v2 - provider_id: sentence-transformers - model_type: embedding -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: brave-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 diff --git a/llama_stack/distributions/meta-reference-gpu/run.yaml.bak b/llama_stack/distributions/meta-reference-gpu/run.yaml.bak deleted file mode 100644 index ab53f3b26..000000000 --- a/llama_stack/distributions/meta-reference-gpu/run.yaml.bak +++ /dev/null @@ -1,128 +0,0 @@ -version: 2 -image_name: meta-reference-gpu -apis: -- agents -- datasetio -- eval -- inference -- safety -- scoring -- telemetry -- tool_runtime -- vector_io -providers: - inference: - - provider_id: meta-reference-inference - provider_type: inline::meta-reference - config: - model: ${env.INFERENCE_MODEL} - checkpoint_dir: ${env.INFERENCE_CHECKPOINT_DIR:=null} - quantization: - type: ${env.QUANTIZATION_TYPE:=bf16} - model_parallel_size: ${env.MODEL_PARALLEL_SIZE:=0} - max_batch_size: ${env.MAX_BATCH_SIZE:=1} - max_seq_len: ${env.MAX_SEQ_LEN:=4096} - - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - vector_io: - - provider_id: faiss - provider_type: inline::faiss - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/faiss_store.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/agents_store.db - responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/responses_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=sqlite} - sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/trace_store.db - otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - eval: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/meta_reference_eval.db - datasetio: - - provider_id: huggingface - provider_type: remote::huggingface - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/huggingface_datasetio.db - - provider_id: localfs - provider_type: inline::localfs - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/localfs_datasetio.db - scoring: - - provider_id: basic - provider_type: inline::basic - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - - provider_id: braintrust - provider_type: inline::braintrust - config: - openai_api_key: ${env.OPENAI_API_KEY:=} - tool_runtime: - - provider_id: brave-search - provider_type: remote::brave-search - config: - api_key: ${env.BRAVE_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: tavily-search - provider_type: remote::tavily-search - config: - api_key: ${env.TAVILY_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: rag-runtime - provider_type: inline::rag-runtime - - provider_id: model-context-protocol - provider_type: remote::model-context-protocol -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/registry.db -inference_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/meta-reference-gpu}/inference_store.db -models: -- metadata: {} - model_id: ${env.INFERENCE_MODEL} - provider_id: meta-reference-inference - model_type: llm -- metadata: - embedding_dimension: 384 - model_id: all-MiniLM-L6-v2 - provider_id: sentence-transformers - model_type: embedding -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 diff --git a/llama_stack/distributions/nvidia/run.yaml.bak b/llama_stack/distributions/nvidia/run.yaml.bak deleted file mode 100644 index 40913cf39..000000000 --- a/llama_stack/distributions/nvidia/run.yaml.bak +++ /dev/null @@ -1,105 +0,0 @@ -version: 2 -image_name: nvidia -apis: -- agents -- datasetio -- eval -- files -- inference -- post_training -- safety -- scoring -- telemetry -- tool_runtime -- vector_io -providers: - inference: - - provider_id: nvidia - provider_type: remote::nvidia - config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} - api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - vector_io: - - provider_id: faiss - provider_type: inline::faiss - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/faiss_store.db - safety: - - provider_id: nvidia - provider_type: remote::nvidia - config: - guardrails_service_url: ${env.GUARDRAILS_SERVICE_URL:=http://localhost:7331} - config_id: ${env.NVIDIA_GUARDRAILS_CONFIG_ID:=self-check} - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/agents_store.db - responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/responses_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=sqlite} - sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/trace_store.db - otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - eval: - - provider_id: nvidia - provider_type: remote::nvidia - config: - evaluator_url: ${env.NVIDIA_EVALUATOR_URL:=http://localhost:7331} - post_training: - - provider_id: nvidia - provider_type: remote::nvidia - config: - api_key: ${env.NVIDIA_API_KEY:=} - dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:=default} - project_id: ${env.NVIDIA_PROJECT_ID:=test-project} - customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:=http://nemo.test} - datasetio: - - provider_id: nvidia - provider_type: remote::nvidia - config: - api_key: ${env.NVIDIA_API_KEY:=} - dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:=default} - project_id: ${env.NVIDIA_PROJECT_ID:=test-project} - datasets_url: ${env.NVIDIA_DATASETS_URL:=http://nemo.test} - scoring: - - provider_id: basic - provider_type: inline::basic - tool_runtime: - - provider_id: rag-runtime - provider_type: inline::rag-runtime - files: - - provider_id: meta-reference-files - provider_type: inline::localfs - config: - storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/nvidia/files} - metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/files_metadata.db -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/registry.db -inference_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/nvidia}/inference_store.db -models: [] -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 diff --git a/llama_stack/distributions/open-benchmark/run.yaml.bak b/llama_stack/distributions/open-benchmark/run.yaml.bak deleted file mode 100644 index 68efa6e89..000000000 --- a/llama_stack/distributions/open-benchmark/run.yaml.bak +++ /dev/null @@ -1,241 +0,0 @@ -version: 2 -image_name: open-benchmark -apis: -- agents -- datasetio -- eval -- inference -- safety -- scoring -- telemetry -- tool_runtime -- vector_io -providers: - inference: - - provider_id: openai - provider_type: remote::openai - config: - api_key: ${env.OPENAI_API_KEY:=} - base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1} - - provider_id: anthropic - provider_type: remote::anthropic - config: - api_key: ${env.ANTHROPIC_API_KEY:=} - - provider_id: gemini - provider_type: remote::gemini - config: - api_key: ${env.GEMINI_API_KEY:=} - - provider_id: groq - provider_type: remote::groq - config: - url: https://api.groq.com - api_key: ${env.GROQ_API_KEY:=} - - provider_id: together - provider_type: remote::together - config: - url: https://api.together.xyz/v1 - api_key: ${env.TOGETHER_API_KEY:=} - vector_io: - - provider_id: sqlite-vec - provider_type: inline::sqlite-vec - config: - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/sqlite_vec.db - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/sqlite_vec_registry.db - - provider_id: ${env.ENABLE_CHROMADB:+chromadb} - provider_type: remote::chromadb - config: - url: ${env.CHROMADB_URL:=} - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/chroma_remote_registry.db - - provider_id: ${env.ENABLE_PGVECTOR:+pgvector} - provider_type: remote::pgvector - config: - host: ${env.PGVECTOR_HOST:=localhost} - port: ${env.PGVECTOR_PORT:=5432} - db: ${env.PGVECTOR_DB:=} - user: ${env.PGVECTOR_USER:=} - password: ${env.PGVECTOR_PASSWORD:=} - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/pgvector_registry.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/agents_store.db - responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/responses_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=sqlite} - sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/trace_store.db - otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - eval: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/meta_reference_eval.db - datasetio: - - provider_id: huggingface - provider_type: remote::huggingface - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/huggingface_datasetio.db - - provider_id: localfs - provider_type: inline::localfs - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/localfs_datasetio.db - scoring: - - provider_id: basic - provider_type: inline::basic - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - - provider_id: braintrust - provider_type: inline::braintrust - config: - openai_api_key: ${env.OPENAI_API_KEY:=} - tool_runtime: - - provider_id: brave-search - provider_type: remote::brave-search - config: - api_key: ${env.BRAVE_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: tavily-search - provider_type: remote::tavily-search - config: - api_key: ${env.TAVILY_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: rag-runtime - provider_type: inline::rag-runtime - - provider_id: model-context-protocol - provider_type: remote::model-context-protocol -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/registry.db -inference_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/open-benchmark}/inference_store.db -models: -- metadata: {} - model_id: gpt-4o - provider_id: openai - provider_model_id: gpt-4o - model_type: llm -- metadata: {} - model_id: claude-3-5-sonnet-latest - provider_id: anthropic - provider_model_id: claude-3-5-sonnet-latest - model_type: llm -- metadata: {} - model_id: gemini/gemini-1.5-flash - provider_id: gemini - provider_model_id: gemini/gemini-1.5-flash - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.3-70B-Instruct - provider_id: groq - provider_model_id: groq/llama-3.3-70b-versatile - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.1-405B-Instruct - provider_id: together - provider_model_id: meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo - model_type: llm -shields: -- shield_id: meta-llama/Llama-Guard-3-8B -vector_dbs: [] -datasets: -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/simpleqa?split=train - metadata: {} - dataset_id: simpleqa -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/mmlu_cot?split=test&name=all - metadata: {} - dataset_id: mmlu_cot -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/gpqa_0shot_cot?split=test&name=gpqa_main - metadata: {} - dataset_id: gpqa_cot -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/math_500?split=test - metadata: {} - dataset_id: math_500 -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/IfEval?split=train - metadata: {} - dataset_id: ifeval -- purpose: eval/messages-answer - source: - type: uri - uri: huggingface://datasets/llamastack/docvqa?split=val - metadata: {} - dataset_id: docvqa -scoring_fns: [] -benchmarks: -- dataset_id: simpleqa - scoring_functions: - - llm-as-judge::405b-simpleqa - metadata: {} - benchmark_id: meta-reference-simpleqa -- dataset_id: mmlu_cot - scoring_functions: - - basic::regex_parser_multiple_choice_answer - metadata: {} - benchmark_id: meta-reference-mmlu-cot -- dataset_id: gpqa_cot - scoring_functions: - - basic::regex_parser_multiple_choice_answer - metadata: {} - benchmark_id: meta-reference-gpqa-cot -- dataset_id: math_500 - scoring_functions: - - basic::regex_parser_math_response - metadata: {} - benchmark_id: meta-reference-math-500 -- dataset_id: ifeval - scoring_functions: - - basic::ifeval - metadata: {} - benchmark_id: meta-reference-ifeval -- dataset_id: docvqa - scoring_functions: - - basic::docvqa - metadata: {} - benchmark_id: meta-reference-docvqa -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 diff --git a/llama_stack/distributions/starter-gpu/run.yaml.bak b/llama_stack/distributions/starter-gpu/run.yaml.bak deleted file mode 100644 index de5fe5681..000000000 --- a/llama_stack/distributions/starter-gpu/run.yaml.bak +++ /dev/null @@ -1,248 +0,0 @@ -version: 2 -image_name: starter-gpu -apis: -- agents -- batches -- datasetio -- eval -- files -- inference -- post_training -- safety -- scoring -- telemetry -- tool_runtime -- vector_io -providers: - inference: - - provider_id: ${env.CEREBRAS_API_KEY:+cerebras} - provider_type: remote::cerebras - config: - base_url: https://api.cerebras.ai - api_key: ${env.CEREBRAS_API_KEY:=} - - provider_id: ${env.OLLAMA_URL:+ollama} - provider_type: remote::ollama - config: - url: ${env.OLLAMA_URL:=http://localhost:11434} - - provider_id: ${env.VLLM_URL:+vllm} - provider_type: remote::vllm - config: - url: ${env.VLLM_URL:=} - max_tokens: ${env.VLLM_MAX_TOKENS:=4096} - api_token: ${env.VLLM_API_TOKEN:=fake} - tls_verify: ${env.VLLM_TLS_VERIFY:=true} - - provider_id: ${env.TGI_URL:+tgi} - provider_type: remote::tgi - config: - url: ${env.TGI_URL:=} - - provider_id: fireworks - provider_type: remote::fireworks - config: - url: https://api.fireworks.ai/inference/v1 - api_key: ${env.FIREWORKS_API_KEY:=} - - provider_id: together - provider_type: remote::together - config: - url: https://api.together.xyz/v1 - api_key: ${env.TOGETHER_API_KEY:=} - - provider_id: bedrock - provider_type: remote::bedrock - - provider_id: ${env.NVIDIA_API_KEY:+nvidia} - provider_type: remote::nvidia - config: - url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} - api_key: ${env.NVIDIA_API_KEY:=} - append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} - - provider_id: openai - provider_type: remote::openai - config: - api_key: ${env.OPENAI_API_KEY:=} - base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1} - - provider_id: anthropic - provider_type: remote::anthropic - config: - api_key: ${env.ANTHROPIC_API_KEY:=} - - provider_id: gemini - provider_type: remote::gemini - config: - api_key: ${env.GEMINI_API_KEY:=} - - provider_id: ${env.VERTEX_AI_PROJECT:+vertexai} - provider_type: remote::vertexai - config: - project: ${env.VERTEX_AI_PROJECT:=} - location: ${env.VERTEX_AI_LOCATION:=us-central1} - - provider_id: groq - provider_type: remote::groq - config: - url: https://api.groq.com - api_key: ${env.GROQ_API_KEY:=} - - provider_id: sambanova - provider_type: remote::sambanova - config: - url: https://api.sambanova.ai/v1 - api_key: ${env.SAMBANOVA_API_KEY:=} - - provider_id: ${env.AZURE_API_KEY:+azure} - provider_type: remote::azure - config: - api_key: ${env.AZURE_API_KEY:=} - api_base: ${env.AZURE_API_BASE:=} - api_version: ${env.AZURE_API_VERSION:=} - api_type: ${env.AZURE_API_TYPE:=} - - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - vector_io: - - provider_id: faiss - provider_type: inline::faiss - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/faiss_store.db - - provider_id: sqlite-vec - provider_type: inline::sqlite-vec - config: - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/sqlite_vec.db - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/sqlite_vec_registry.db - - provider_id: ${env.MILVUS_URL:+milvus} - provider_type: inline::milvus - config: - db_path: ${env.MILVUS_DB_PATH:=~/.llama/distributions/starter-gpu}/milvus.db - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/milvus_registry.db - - provider_id: ${env.CHROMADB_URL:+chromadb} - provider_type: remote::chromadb - config: - url: ${env.CHROMADB_URL:=} - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu/}/chroma_remote_registry.db - - provider_id: ${env.PGVECTOR_DB:+pgvector} - provider_type: remote::pgvector - config: - host: ${env.PGVECTOR_HOST:=localhost} - port: ${env.PGVECTOR_PORT:=5432} - db: ${env.PGVECTOR_DB:=} - user: ${env.PGVECTOR_USER:=} - password: ${env.PGVECTOR_PASSWORD:=} - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/pgvector_registry.db - files: - - provider_id: meta-reference-files - provider_type: inline::localfs - config: - storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter-gpu/files} - metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/files_metadata.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - - provider_id: code-scanner - provider_type: inline::code-scanner - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/agents_store.db - responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/responses_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=sqlite} - sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/trace_store.db - otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - post_training: - - provider_id: huggingface-gpu - provider_type: inline::huggingface-gpu - config: - checkpoint_format: huggingface - distributed_backend: null - device: cpu - dpo_output_dir: ~/.llama/distributions/starter-gpu/dpo_output - eval: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/meta_reference_eval.db - datasetio: - - provider_id: huggingface - provider_type: remote::huggingface - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/huggingface_datasetio.db - - provider_id: localfs - provider_type: inline::localfs - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/localfs_datasetio.db - scoring: - - provider_id: basic - provider_type: inline::basic - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - - provider_id: braintrust - provider_type: inline::braintrust - config: - openai_api_key: ${env.OPENAI_API_KEY:=} - tool_runtime: - - provider_id: brave-search - provider_type: remote::brave-search - config: - api_key: ${env.BRAVE_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: tavily-search - provider_type: remote::tavily-search - config: - api_key: ${env.TAVILY_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: rag-runtime - provider_type: inline::rag-runtime - - provider_id: model-context-protocol - provider_type: remote::model-context-protocol - batches: - - provider_id: reference - provider_type: inline::reference - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/batches.db -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/registry.db -inference_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter-gpu}/inference_store.db -models: [] -shields: -- shield_id: llama-guard - provider_id: ${env.SAFETY_MODEL:+llama-guard} - provider_shield_id: ${env.SAFETY_MODEL:=} -- shield_id: code-scanner - provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} - provider_shield_id: ${env.CODE_SCANNER_MODEL:=} -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 diff --git a/llama_stack/distributions/watsonx/run.yaml.bak b/llama_stack/distributions/watsonx/run.yaml.bak deleted file mode 100644 index 92f367910..000000000 --- a/llama_stack/distributions/watsonx/run.yaml.bak +++ /dev/null @@ -1,219 +0,0 @@ -version: 2 -image_name: watsonx -apis: -- agents -- datasetio -- eval -- inference -- safety -- scoring -- telemetry -- tool_runtime -- vector_io -- files -providers: - inference: - - provider_id: watsonx - provider_type: remote::watsonx - config: - url: ${env.WATSONX_BASE_URL:=https://us-south.ml.cloud.ibm.com} - api_key: ${env.WATSONX_API_KEY:=} - project_id: ${env.WATSONX_PROJECT_ID:=} - - provider_id: sentence-transformers - provider_type: inline::sentence-transformers - vector_io: - - provider_id: faiss - provider_type: inline::faiss - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/faiss_store.db - safety: - - provider_id: llama-guard - provider_type: inline::llama-guard - config: - excluded_categories: [] - agents: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - persistence_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/agents_store.db - responses_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/responses_store.db - telemetry: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - service_name: "${env.OTEL_SERVICE_NAME:=\u200B}" - sinks: ${env.TELEMETRY_SINKS:=console,sqlite} - sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/trace_store.db - otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=} - eval: - - provider_id: meta-reference - provider_type: inline::meta-reference - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/meta_reference_eval.db - datasetio: - - provider_id: huggingface - provider_type: remote::huggingface - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/huggingface_datasetio.db - - provider_id: localfs - provider_type: inline::localfs - config: - kvstore: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/localfs_datasetio.db - scoring: - - provider_id: basic - provider_type: inline::basic - - provider_id: llm-as-judge - provider_type: inline::llm-as-judge - - provider_id: braintrust - provider_type: inline::braintrust - config: - openai_api_key: ${env.OPENAI_API_KEY:=} - tool_runtime: - - provider_id: brave-search - provider_type: remote::brave-search - config: - api_key: ${env.BRAVE_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: tavily-search - provider_type: remote::tavily-search - config: - api_key: ${env.TAVILY_SEARCH_API_KEY:=} - max_results: 3 - - provider_id: rag-runtime - provider_type: inline::rag-runtime - - provider_id: model-context-protocol - provider_type: remote::model-context-protocol - files: - - provider_id: meta-reference-files - provider_type: inline::localfs - config: - storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/watsonx/files} - metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/files_metadata.db -metadata_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/registry.db -inference_store: - type: sqlite - db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/inference_store.db -models: -- metadata: {} - model_id: meta-llama/llama-3-3-70b-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-3-70b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.3-70B-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-3-70b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-2-13b-chat - provider_id: watsonx - provider_model_id: meta-llama/llama-2-13b-chat - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-2-13b - provider_id: watsonx - provider_model_id: meta-llama/llama-2-13b-chat - model_type: llm -- metadata: {} - model_id: meta-llama/llama-3-1-70b-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-1-70b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.1-70B-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-1-70b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-3-1-8b-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-1-8b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.1-8B-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-1-8b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-3-2-11b-vision-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-11b-vision-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-11B-Vision-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-11b-vision-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-3-2-1b-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-1b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-1B-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-1b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-3-2-3b-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-3b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-3B-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-3b-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-3-2-90b-vision-instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-90b-vision-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-3.2-90B-Vision-Instruct - provider_id: watsonx - provider_model_id: meta-llama/llama-3-2-90b-vision-instruct - model_type: llm -- metadata: {} - model_id: meta-llama/llama-guard-3-11b-vision - provider_id: watsonx - provider_model_id: meta-llama/llama-guard-3-11b-vision - model_type: llm -- metadata: {} - model_id: meta-llama/Llama-Guard-3-11B-Vision - provider_id: watsonx - provider_model_id: meta-llama/llama-guard-3-11b-vision - model_type: llm -- metadata: - embedding_dimension: 384 - model_id: all-MiniLM-L6-v2 - provider_id: sentence-transformers - model_type: embedding -shields: [] -vector_dbs: [] -datasets: [] -scoring_fns: [] -benchmarks: [] -tool_groups: -- toolgroup_id: builtin::websearch - provider_id: tavily-search -- toolgroup_id: builtin::rag - provider_id: rag-runtime -server: - port: 8321 diff --git a/practice/check_test_cases.py b/practice/check_test_cases.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/practice/coin_change_practice.py b/practice/coin_change_practice.py deleted file mode 100644 index c2465c676..000000000 --- a/practice/coin_change_practice.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -Coin Change - Interview Practice Problem - -Problem: You are given an integer array coins representing coins of different denominations -and an integer amount representing a total amount of money. - -Return the fewest number of coins that you need to make up that amount. -If that amount of money cannot be made up by any combination of the coins, return -1. - -You may assume that you have an infinite number of each kind of coin. - -Example 1: -Input: coins = [1,3,4], amount = 6 -Output: 2 -Explanation: 6 = 3 + 3 - -Example 2: -Input: coins = [2], amount = 3 -Output: -1 - -Example 3: -Input: coins = [1], amount = 0 -Output: 0 - -Constraints: -- 1 <= coins.length <= 12 -- 1 <= coins[i] <= 2^31 - 1 -- 0 <= amount <= 10^4 -""" - - -def coinChange(coins, amount): - """ - Find the minimum number of coins needed to make the given amount. - - Args: - coins: List[int] - denominations of coins available - amount: int - target amount to make - - Returns: - int - minimum number of coins needed, or -1 if impossible - - Time Complexity Goal: O(amount * len(coins)) - Space Complexity Goal: O(amount) - - Hint: This is a classic Dynamic Programming problem! - Think about: What's the minimum coins needed for each amount from 0 to target? - """ - min_coins = [-1 for _ in range(amount) + 1] - min_coins[0] = 0 - for cur_amount in range(amount) + 1: - num = [amount + 1 for _ in coins] - for i, coin in enumerate(coins): - hist = cur_amount - coin - if hist >= 0 and min_coins[hist] != -1 - num[i] = min_coins[] - - - -def test_coin_change(): - """Comprehensive test cases for coinChange function""" - - print("=" * 60) - print("COIN CHANGE - TEST CASES") - print("=" * 60) - - # Test Case 1: Basic example from problem description - print("\nTest Case 1: Basic Example") - coins1 = [1, 3, 4] - amount1 = 6 - result1 = coinChange(coins1, amount1) - expected1 = 2 # 3 + 3 - print(f"Coins: {coins1}, Amount: {amount1}") - print(f"Result: {result1}, Expected: {expected1}") - assert result1 == expected1, f"Test 1 failed: got {result1}, expected {expected1}" - print("✅ Test 1 PASSED") - - # Test Case 2: Impossible case - print("\nTest Case 2: Impossible Case") - coins2 = [2] - amount2 = 3 - result2 = coinChange(coins2, amount2) - expected2 = -1 - print(f"Coins: {coins2}, Amount: {amount2}") - print(f"Result: {result2}, Expected: {expected2}") - assert result2 == expected2, f"Test 2 failed: got {result2}, expected {expected2}" - print("✅ Test 2 PASSED") - - # Test Case 3: Zero amount - print("\nTest Case 3: Zero Amount") - coins3 = [1] - amount3 = 0 - result3 = coinChange(coins3, amount3) - expected3 = 0 - print(f"Coins: {coins3}, Amount: {amount3}") - print(f"Result: {result3}, Expected: {expected3}") - assert result3 == expected3, f"Test 3 failed: got {result3}, expected {expected3}" - print("✅ Test 3 PASSED") - - # Test Case 4: Single coin solution - print("\nTest Case 4: Single Coin Solution") - coins4 = [1, 3, 4] - amount4 = 4 - result4 = coinChange(coins4, amount4) - expected4 = 1 # Just use coin 4 - print(f"Coins: {coins4}, Amount: {amount4}") - print(f"Result: {result4}, Expected: {expected4}") - assert result4 == expected4, f"Test 4 failed: got {result4}, expected {expected4}" - print("✅ Test 4 PASSED") - - # Test Case 5: Greedy fails, DP succeeds - print("\nTest Case 5: Greedy Algorithm Fails") - coins5 = [1, 3, 4] - amount5 = 6 - result5 = coinChange(coins5, amount5) - expected5 = 2 # 3 + 3, not 4 + 1 + 1 (greedy would give 3) - print(f"Coins: {coins5}, Amount: {amount5}") - print(f"Result: {result5}, Expected: {expected5}") - print("Note: Greedy (largest first) would give 4+1+1=3 coins, but optimal is 3+3=2 coins") - assert result5 == expected5, f"Test 5 failed: got {result5}, expected {expected5}" - print("✅ Test 5 PASSED") - - # Test Case 6: Large amount (performance test) - print("\nTest Case 6: Large Amount (Performance Test)") - coins6 = [1, 5, 10, 25] - amount6 = 67 - result6 = coinChange(coins6, amount6) - expected6 = 5 # 25 + 25 + 10 + 5 + 1 + 1 = 67 - print(f"Coins: {coins6}, Amount: {amount6}") - print(f"Result: {result6}, Expected: {expected6}") - assert result6 == expected6, f"Test 6 failed: got {result6}, expected {expected6}" - print("✅ Test 6 PASSED") - - # Test Case 7: All ones (edge case) - print("\nTest Case 7: Only Coin Value 1") - coins7 = [1] - amount7 = 5 - result7 = coinChange(coins7, amount7) - expected7 = 5 # 1 + 1 + 1 + 1 + 1 - print(f"Coins: {coins7}, Amount: {amount7}") - print(f"Result: {result7}, Expected: {expected7}") - assert result7 == expected7, f"Test 7 failed: got {result7}, expected {expected7}" - print("✅ Test 7 PASSED") - - # Test Case 8: Complex case with many coins - print("\nTest Case 8: Complex Case") - coins8 = [2, 3, 5] - amount8 = 9 - result8 = coinChange(coins8, amount8) - expected8 = 3 # 3 + 3 + 3 - print(f"Coins: {coins8}, Amount: {amount8}") - print(f"Result: {result8}, Expected: {expected8}") - assert result8 == expected8, f"Test 8 failed: got {result8}, expected {expected8}" - print("✅ Test 8 PASSED") - - # Test Case 9: Impossible with multiple coins - print("\nTest Case 9: Impossible with Multiple Coins") - coins9 = [3, 5] - amount9 = 1 - result9 = coinChange(coins9, amount9) - expected9 = -1 - print(f"Coins: {coins9}, Amount: {amount9}") - print(f"Result: {result9}, Expected: {expected9}") - assert result9 == expected9, f"Test 9 failed: got {result9}, expected {expected9}" - print("✅ Test 9 PASSED") - - # Test Case 10: Large performance test - print("\nTest Case 10: Large Performance Test") - coins10 = [1, 2, 5, 10, 20, 50] - amount10 = 1000 - result10 = coinChange(coins10, amount10) - expected10 = 20 # 20 coins of 50 each - print(f"Coins: {coins10}, Amount: {amount10}") - print(f"Result: {result10}, Expected: {expected10}") - assert result10 == expected10, f"Test 10 failed: got {result10}, expected {expected10}" - print("✅ Test 10 PASSED") - - print("\n" + "=" * 60) - print("🎉 ALL TESTS PASSED! Great job!") - print("=" * 60) - - -if __name__ == "__main__": - test_coin_change() diff --git a/practice/hangman_guesser.py b/practice/hangman_guesser.py deleted file mode 100644 index 55d3e8ad9..000000000 --- a/practice/hangman_guesser.py +++ /dev/null @@ -1,243 +0,0 @@ -from collections import defaultdict - - -def guess_next_character(mystery_word_pattern, guessed_characters, word_pool): - """ - Returns: - str: Single character that is most likely to be in the mystery word, - or None if no good guess can be made - """ - num_chars = len(mystery_word_pattern) - correct_positions = {i: c for i, c in enumerate(mystery_word_pattern) if c != "_"} - char_counts_by_position = [defaultdict(int) for _ in range(num_chars)] - - for word in word_pool: - wordlen = len(word) - matches = True - for i, c in correct_positions.items(): - if i < wordlen and word[i] != c: - matches = False - break - if not matches: - continue - - for j, c in enumerate(word): - if j >= num_chars: - continue - if c in guessed_characters: - continue - if mystery_word_pattern[j] != "_": - continue - char_counts_by_position[j][c] += 1 - - max_count = 0 - char = None - for counts in char_counts_by_position: - for c, v in counts.items(): - if v > max_count: - max_count = v - char = c - return char - - -# Test cases -def test_guess_next_character(): - """Test cases for the hangman character guesser""" - - # Test case 1: Basic case with clear winner - pattern = "_at" - guessed = ["a", "t"] - pool = ["cat", "bat", "hat", "rat", "mat"] - result = guess_next_character(pattern, guessed, pool) - # Any of 'c', 'b', 'h', 'r', 'm' would be valid since they all appear once - assert result in ["c", "b", "h", "r", "m"], f"Expected one of 'c','b','h','r','m', got {result}" - - # Test case 2: Some characters already guessed - pattern = "_at" - guessed = ["a", "t", "c", "b"] - pool = ["cat", "bat", "hat", "rat", "mat"] - result = guess_next_character(pattern, guessed, pool) - assert result in ["h", "r", "m"], f"Expected one of 'h','r','m', got {result}" - - # Test case 3: Multiple missing positions - pattern = "_a_e" - guessed = ["a", "e"] - pool = ["cake", "bake", "lake", "make", "take", "wake", "came", "name", "game", "same"] - result = guess_next_character(pattern, guessed, pool) - # Should return most frequent character in missing positions - assert isinstance(result, str) and len(result) == 1, f"Expected single character, got {result}" - - # Test case 4: Word pool doesn't match pattern (should filter) - pattern = "_at" - guessed = ["a", "t"] - pool = ["cat", "dog", "bat", "rat", "hat"] # "dog" doesn't match the pattern - result = guess_next_character(pattern, guessed, pool) - assert result in ["c", "b", "r", "h"], f"Expected one of 'c','b','r','h', got {result}" - - # Test case 5: All characters in matching words already guessed - pattern = "_at" - guessed = ["a", "t", "c", "b", "h", "r", "m"] - pool = ["cat", "bat", "hat", "rat", "mat"] - result = guess_next_character(pattern, guessed, pool) - assert result is None, f"Expected None when all characters guessed, got {result}" - - # Test case 6: Single character missing - pattern = "c_t" - guessed = ["c", "t"] - pool = ["cat", "cot", "cut", "cit"] - result = guess_next_character(pattern, guessed, pool) - # Should return most frequent vowel/character in position 1 - assert isinstance(result, str) and len(result) == 1, f"Expected single character, got {result}" - - # Test case 7: Empty word pool - pattern = "_at" - guessed = ["a", "t"] - pool = [] - result = guess_next_character(pattern, guessed, pool) - assert result is None, f"Expected None for empty pool, got {result}" - - # Test case 8: No matching words in pool - pattern = "_at" - guessed = ["a", "t"] - pool = ["dog", "run", "sun"] - result = guess_next_character(pattern, guessed, pool) - assert result is None, f"Expected None when no words match pattern, got {result}" - - # Test case 9: Longer word with multiple gaps - pattern = "_o_er" - guessed = ["o", "e", "r"] - pool = ["power", "tower", "lower", "cover", "hover", "mower", "boxer", "poker"] - result = guess_next_character(pattern, guessed, pool) - assert isinstance(result, str) and len(result) == 1, f"Expected single character, got {result}" - - # Test case 10: Case sensitivity - pattern = "_at" - guessed = ["a", "t"] - pool = ["Cat", "BAT", "hat"] # Mixed case - result = guess_next_character(pattern, guessed, pool) - # Should handle case appropriately - assert result is not None, f"Expected a character, got {result}" - - # Test case 11: Tie-breaking - multiple characters with same frequency - pattern = "_a_" - guessed = ["a"] - pool = ["cat", "bat", "had", "bag"] # c,b,h,g all appear once in pos 0; t,t,d,g in pos 2 - result = guess_next_character(pattern, guessed, pool) - # Should return one of the valid characters (implementation dependent) - assert result is not None and result not in guessed, f"Expected unguessed character, got {result}" - - # Test case 12: Complex pattern with repeated characters in word - pattern = "_oo_" - guessed = ["o"] - pool = ["book", "look", "took", "cook", "hook", "noon", "boom", "doom"] - result = guess_next_character(pattern, guessed, pool) - assert result is not None and result != "o", f"Expected character other than 'o', got {result}" - - # Test case 13: Very long word with sparse information - pattern = "___e____i__" - guessed = ["e", "i"] - pool = ["programming", "engineering", "mathematics", "development"] - result = guess_next_character(pattern, guessed, pool) - # Only "programming" and "engineering" match the pattern - assert result is not None, f"Expected a character, got {result}" - - # Test case 14: Word with all same length but different patterns - pattern = "_a__a" - guessed = ["a"] - pool = ["mamma", "drama", "llama", "karma", "panda"] # "panda" doesn't match - result = guess_next_character(pattern, guessed, pool) - # Should only consider mamma, drama, llama, karma - assert result is not None and result != "a", f"Expected character other than 'a', got {result}" - - # Test case 15: Single letter word - pattern = "_" - guessed = [] - pool = ["a", "I", "o"] - result = guess_next_character(pattern, guessed, pool) - assert result in ["a", "I", "o"], f"Expected one of 'a','I','o', got {result}" - - # Test case 16: Almost complete word - only one missing - pattern = "almos_" - guessed = ["a", "l", "m", "o", "s"] - pool = ["almost", "almond"] # "almond" doesn't match pattern - result = guess_next_character(pattern, guessed, pool) - assert result == "t", f"Expected 't', got {result}" - - # Test case 17: Frequency analysis with position weighting - pattern = "_e__e_" - guessed = ["e"] - pool = ["better", "letter", "pepper", "keeper", "helper", "member"] - result = guess_next_character(pattern, guessed, pool) - # Should find most frequent character across all missing positions - assert result is not None and result != "e", f"Expected character other than 'e', got {result}" - - # Test case 18: Words with different lengths in pool (should filter by length) - pattern = "___" - guessed = [] - pool = ["cat", "dog", "fox", "car", "bar", "bat", "run"] - result = guess_next_character(pattern, guessed, pool) - # Should only consider 3-letter words: cat, dog, fox, car, bar, bat, run - assert result is not None, f"Expected a character, got {result}" - - # Test case 19: All positions filled except one, but multiple valid completions - pattern = "c_r" - guessed = ["c", "r"] - pool = ["car", "cor", "cur", "cir"] # All valid completions - result = guess_next_character(pattern, guessed, pool) - assert result in ["a", "o", "u", "i"], f"Expected one of 'a','o','u','i', got {result}" - - # Test case 20: Pattern with numbers/special chars (edge case) - pattern = "_a_" - guessed = ["a"] - pool = ["1a2", "3a4", "cat", "bat"] # Mix of alphanumeric and letters - result = guess_next_character(pattern, guessed, pool) - # Should handle all valid characters - assert result is not None, f"Expected a character, got {result}" - - # Test case 21: Very large frequency difference - pattern = "_a_" - guessed = ["a"] - pool = ["cat"] * 100 + ["bat", "hat", "rat"] # 'c' appears 100 times, others once each - result = guess_next_character(pattern, guessed, pool) - assert result == "c", f"Expected 'c' due to high frequency, got {result}" - - # Test case 22: Pattern where some positions have no valid characters - pattern = "_x_" - guessed = [ - "x", - "a", - "b", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "o", - "p", - "q", - "r", - "s", - "t", - "u", - "v", - "w", - "y", - "z", - ] - pool = ["axe", "fox", "box", "six"] - result = guess_next_character(pattern, guessed, pool) - # All common letters guessed, should return None or a less common letter - assert result is None or result not in guessed, f"Unexpected result: {result}" - - print("All test cases passed!") - - -if __name__ == "__main__": - test_guess_next_character() diff --git a/practice/rotate_image_practice.py b/practice/rotate_image_practice.py deleted file mode 100644 index 7d7ef48eb..000000000 --- a/practice/rotate_image_practice.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -Rotate Image (Matrix) - Interview Practice Problem - -Problem: Given an n x n 2D matrix representing an image, rotate it by 90 degrees clockwise in-place. -You have to rotate the image in-place, which means you have to modify the input 2D matrix directly. -DO NOT allocate another 2D matrix and do the rotation. - -Example: -Input: [[1,2,3], - [4,5,6], - [7,8,9]] - -Output: [[7,4,1], - [8,5,2], - [9,6,3]] - -Constraints: -- n == matrix.length == matrix[i].length -- 1 <= n <= 20 -- -1000 <= matrix[i][j] <= 1000 -""" - - -def rotate(matrix): - """ - Rotate the matrix 90 degrees clockwise in-place. - - Args: - matrix: List[List[int]] - n x n 2D matrix - - Returns: - None - Do not return anything, modify matrix in-place instead. - - Time Complexity Goal: O(n^2) - Space Complexity Goal: O(1) - """ - a = matrix - n = len(a[0]) - - # first swap against one diagonal so a(i, j) -> a(n - j - 1, n - i - 1) - for i in range(0, n): - for j in range(0, n - i): - t = a[i][j] - a[i][j] = a[n - j - 1][n - i - 1] - a[n - j - 1][n - i - 1] = t - - # now flip across horizontal line - for i in range(0, n // 2): - for j in range(0, n): - t = a[i][j] - a[i][j] = a[n - i - 1][j] - a[n - i - 1][j] = t - - -def print_matrix(matrix, title="Matrix"): - """Helper function to print matrix in a readable format""" - print(f"\n{title}:") - for row in matrix: - print(row) - - -def test_rotate(): - """Comprehensive test cases for rotate function""" - - print("=" * 60) - print("ROTATE IMAGE - TEST CASES") - print("=" * 60) - - # Test Case 1: 3x3 matrix (basic example) - print("\nTest Case 1: 3x3 Matrix") - matrix1 = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - expected1 = [[7, 4, 1], [8, 5, 2], [9, 6, 3]] - print_matrix(matrix1, "Before rotation") - rotate(matrix1) - print_matrix(matrix1, "After rotation") - print_matrix(expected1, "Expected") - assert matrix1 == expected1, f"Test 1 failed: got {matrix1}, expected {expected1}" - print("✅ Test 1 PASSED") - - # Test Case 2: 1x1 matrix (edge case) - print("\nTest Case 2: 1x1 Matrix (Edge Case)") - matrix2 = [[42]] - expected2 = [[42]] - print_matrix(matrix2, "Before rotation") - rotate(matrix2) - print_matrix(matrix2, "After rotation") - assert matrix2 == expected2, f"Test 2 failed: got {matrix2}, expected {expected2}" - print("✅ Test 2 PASSED") - - # Test Case 3: 2x2 matrix - print("\nTest Case 3: 2x2 Matrix") - matrix3 = [[1, 2], [3, 4]] - expected3 = [[3, 1], [4, 2]] - print_matrix(matrix3, "Before rotation") - rotate(matrix3) - print_matrix(matrix3, "After rotation") - print_matrix(expected3, "Expected") - assert matrix3 == expected3, f"Test 3 failed: got {matrix3}, expected {expected3}" - print("✅ Test 3 PASSED") - - # Test Case 4: 4x4 matrix (larger matrix) - print("\nTest Case 4: 4x4 Matrix") - matrix4 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]] - expected4 = [[13, 9, 5, 1], [14, 10, 6, 2], [15, 11, 7, 3], [16, 12, 8, 4]] - print_matrix(matrix4, "Before rotation") - rotate(matrix4) - print_matrix(matrix4, "After rotation") - print_matrix(expected4, "Expected") - assert matrix4 == expected4, f"Test 4 failed: got {matrix4}, expected {expected4}" - print("✅ Test 4 PASSED") - - # Test Case 5: Matrix with negative numbers - print("\nTest Case 5: Matrix with Negative Numbers") - matrix5 = [[-1, -2, -3], [-4, -5, -6], [-7, -8, -9]] - expected5 = [[-7, -4, -1], [-8, -5, -2], [-9, -6, -3]] - print_matrix(matrix5, "Before rotation") - rotate(matrix5) - print_matrix(matrix5, "After rotation") - print_matrix(expected5, "Expected") - assert matrix5 == expected5, f"Test 5 failed: got {matrix5}, expected {expected5}" - print("✅ Test 5 PASSED") - - # Test Case 6: 5x5 matrix (odd dimension, complexity test) - print("\nTest Case 6: 5x5 Matrix (Complexity Test)") - matrix6 = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]] - expected6 = [[21, 16, 11, 6, 1], [22, 17, 12, 7, 2], [23, 18, 13, 8, 3], [24, 19, 14, 9, 4], [25, 20, 15, 10, 5]] - print_matrix(matrix6, "Before rotation") - rotate(matrix6) - print_matrix(matrix6, "After rotation") - print_matrix(expected6, "Expected") - assert matrix6 == expected6, f"Test 6 failed: got {matrix6}, expected {expected6}" - print("✅ Test 6 PASSED") - - print("\n" + "=" * 60) - print("🎉 ALL TESTS PASSED! Great job!") - print("=" * 60) - - -if __name__ == "__main__": - test_rotate()