forked from phoenix-oss/llama-stack-mirror
3 lines
174 B
Text
3 lines
174 B
Text
[submodule "llama_stack/providers/impls/ios/inference/executorch"]
|
|
path = llama_stack/providers/inline/ios/inference/executorch
|
|
url = https://github.com/pytorch/executorch
|