mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 04:00:42 +00:00
remove setting interops threads
This commit is contained in:
parent
a46fe682dc
commit
462cdb051c
1 changed files with 1 additions and 5 deletions
|
|
@ -93,11 +93,7 @@ class SentenceTransformerEmbeddingMixin:
|
||||||
# PyTorch's OpenMP kernels can segfault on macOS when spawned from background
|
# PyTorch's OpenMP kernels can segfault on macOS when spawned from background
|
||||||
# threads with the default parallel settings, so force a single-threaded CPU run.
|
# threads with the default parallel settings, so force a single-threaded CPU run.
|
||||||
log.debug(f"Constraining torch threads on {platform_name} to a single worker")
|
log.debug(f"Constraining torch threads on {platform_name} to a single worker")
|
||||||
try:
|
torch.set_num_threads(1)
|
||||||
torch.set_num_threads(1)
|
|
||||||
torch.set_num_interop_threads(1)
|
|
||||||
except Exception:
|
|
||||||
log.debug(f"Failed to adjust torch thread counts on {platform_name}", exc_info=True)
|
|
||||||
|
|
||||||
return SentenceTransformer(model, trust_remote_code=True)
|
return SentenceTransformer(model, trust_remote_code=True)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue