Fix GPU detection on Apple Silicon

This commit is contained in:
Felipe Daragon
2025-05-14 02:05:34 +01:00
parent a33d858916
commit 59b6233882
3 changed files with 33 additions and 40 deletions

View File

@@ -112,56 +112,48 @@ class Refacer:
raise Exception("ERROR, something went wrong downloading the model!")
def __check_providers(self):
available_providers = rt.get_available_providers()
if self.force_cpu:
self.providers = ['CPUExecutionProvider']
else:
self.providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
# Prefer faster execution providers in order
self.providers = []
for p in ['CoreMLExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider']:
if p in available_providers:
self.providers.append(p)
rt.set_default_logger_severity(4)
self.sess_options = rt.SessionOptions()
self.sess_options.execution_mode = rt.ExecutionMode.ORT_PARALLEL # Better parallelism
self.sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_ALL
def __check_providers(self):
if self.force_cpu:
self.providers = ['CPUExecutionProvider']
else:
self.providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
rt.set_default_logger_severity(4)
self.sess_options = rt.SessionOptions()
self.sess_options.execution_mode = rt.ExecutionMode.ORT_PARALLEL # Better parallelism
self.sess_options.execution_mode = rt.ExecutionMode.ORT_PARALLEL
self.sess_options.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_ALL
# Use a temporary model session to detect the actual active provider
test_model = os.path.expanduser("~/.insightface/models/buffalo_l/det_10g.onnx")
try:
test_session = rt.InferenceSession(test_model, self.sess_options, providers=self.providers)
active_provider = test_session.get_providers()[0] # First provider used
test_session = rt.InferenceSession(test_model, self.sess_options, providers=self.providers)
active_provider = test_session.get_providers()[0]
except Exception as e:
print(f"[ERROR] Failed to create test session: {e}")
active_provider = 'CPUExecutionProvider' # Safe fallback
print(f"[ERROR] Failed to create test session: {e}")
active_provider = 'CPUExecutionProvider'
# Set mode based on actual provider
if active_provider == 'CUDAExecutionProvider':
self.mode = RefacerMode.CUDA
self.use_num_cpus = 2
self.sess_options.intra_op_num_threads = 1
self.mode = RefacerMode.CUDA
self.use_num_cpus = 2
self.sess_options.intra_op_num_threads = 1
elif active_provider == 'CoreMLExecutionProvider':
self.mode = RefacerMode.COREML
self.use_num_cpus = max(mp.cpu_count() - 1, 1)
self.sess_options.intra_op_num_threads = int(self.use_num_cpus / 2)
# elif active_provider == 'TensorrtExecutionProvider':
self.mode = RefacerMode.COREML
self.use_num_cpus = max(mp.cpu_count() - 1, 1)
self.sess_options.intra_op_num_threads = int(self.use_num_cpus / 2)
elif self.colab_performance:
self.mode = RefacerMode.TENSORRT
self.use_num_cpus = max(mp.cpu_count() - 1, 1)
self.sess_options.intra_op_num_threads = int(self.use_num_cpus / 2)
self.mode = RefacerMode.TENSORRT
self.use_num_cpus = max(mp.cpu_count() - 1, 1)
self.sess_options.intra_op_num_threads = int(self.use_num_cpus / 2)
else:
self.mode = RefacerMode.CPU
self.use_num_cpus = max(mp.cpu_count() - 1, 1)
self.sess_options.intra_op_num_threads = int(self.use_num_cpus / 2)
self.mode = RefacerMode.CPU
self.use_num_cpus = max(mp.cpu_count() - 1, 1)
self.sess_options.intra_op_num_threads = int(self.use_num_cpus / 2)
print(f"Available providers: {available_providers}")
print(f"Using providers: {self.providers}")
print(f"Active provider: {active_provider}")
print(f"Mode: {self.mode}")