Fix Triton inference without explicit metadata (#16938)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
pull/16576/head^2
Olivier Jolly 1 month ago committed by GitHub
parent 1aebe6ffed
commit ccda7ff973
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      ultralytics/nn/autobackend.py

@ -126,7 +126,7 @@ class AutoBackend(nn.Module):
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
stride = 32 # default stride
model, metadata = None, None
model, metadata, task = None, None, None
# Set device
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA

Loading…
Cancel
Save