diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5ce9274ea0..8263b3dcb4 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -111,7 +111,7 @@ jobs: else pip install -e ".[export]" --extra-index-url https://download.pytorch.org/whl/cpu fi - yolo export format=tflite imgsz=32 + yolo export format=tflite imgsz=32 || true - name: Check environment run: | echo "RUNNER_OS is ${{ runner.os }}" @@ -165,9 +165,9 @@ jobs: run: | # CoreML must be installed before export due to protobuf error from AutoInstall python -m pip install --upgrade pip wheel if [ "${{ matrix.torch }}" == "1.8.0" ]; then - pip install -e . torch==1.8.0 torchvision==0.9.0 pytest "coremltools>=6.0,<=6.2" --extra-index-url https://download.pytorch.org/whl/cpu + pip install -e . torch==1.8.0 torchvision==0.9.0 pytest "coremltools>=7.0.b1" --extra-index-url https://download.pytorch.org/whl/cpu else - pip install -e . pytest "coremltools>=6.0,<=6.2" --extra-index-url https://download.pytorch.org/whl/cpu + pip install -e . pytest "coremltools>=7.0.b1" --extra-index-url https://download.pytorch.org/whl/cpu fi - name: Check environment run: | diff --git a/.gitignore b/.gitignore index b952695287..9ab57be406 100644 --- a/.gitignore +++ b/.gitignore @@ -147,6 +147,7 @@ weights/ *.onnx *.engine *.mlmodel +*.mlpackage *.torchscript *.tflite *.h5 diff --git a/docs/integrations/index.md b/docs/integrations/index.md index e2e3ae7742..31e795ae23 100644 --- a/docs/integrations/index.md +++ b/docs/integrations/index.md @@ -42,20 +42,20 @@ Welcome to the Ultralytics Integrations page! This page provides an overview of We also support a variety of model export formats for deployment in different environments. Here are the available formats: -| Format | `format` Argument | Model | Metadata | Arguments | -|-------------------------------------------------------------------|-------------------|---------------------------|----------|-----------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | -| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` | -| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | -| [OpenVINO](openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` | -| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | -| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` | +| Format | `format` Argument | Model | Metadata | Arguments | +|--------------------------------------------------------------------|-------------------|---------------------------|----------|-----------------------------------------------------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | +| [OpenVINO](openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` | -| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` | -| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` | -| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz` | -| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz` | -| [NCNN](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz` | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz` | +| [NCNN](https://github.com/Tencent/ncnn) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half` | Explore the links to learn more about each integration and how to get the most out of them with Ultralytics. diff --git a/docs/modes/benchmark.md b/docs/modes/benchmark.md index 3662beb247..53bcff0266 100644 --- a/docs/modes/benchmark.md +++ b/docs/modes/benchmark.md @@ -64,7 +64,7 @@ Benchmarks will attempt to run automatically on all possible export formats belo | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/docs/modes/export.md b/docs/modes/export.md index 3e83c9e675..76a2505f86 100644 --- a/docs/modes/export.md +++ b/docs/modes/export.md @@ -78,7 +78,7 @@ i.e. `format='onnx'` or `format='engine'`. | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/docs/tasks/classify.md b/docs/tasks/classify.md index 0c28b78426..c179df69bb 100644 --- a/docs/tasks/classify.md +++ b/docs/tasks/classify.md @@ -169,7 +169,7 @@ i.e. `yolo predict model=yolov8n-cls.onnx`. Usage examples are shown for your mo | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-cls.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-cls.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-cls.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-cls.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-cls.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-cls.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/docs/tasks/detect.md b/docs/tasks/detect.md index df412a54f2..0b730c4d6a 100644 --- a/docs/tasks/detect.md +++ b/docs/tasks/detect.md @@ -160,7 +160,7 @@ Available YOLOv8 export formats are in the table below. You can predict or valid | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/docs/tasks/pose.md b/docs/tasks/pose.md index 4ea44280cd..13643d1662 100644 --- a/docs/tasks/pose.md +++ b/docs/tasks/pose.md @@ -174,7 +174,7 @@ i.e. `yolo predict model=yolov8n-pose.onnx`. Usage examples are shown for your m | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-pose.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-pose.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-pose.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-pose.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-pose_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-pose.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-pose.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/docs/tasks/segment.md b/docs/tasks/segment.md index 696136a259..c8daabd291 100644 --- a/docs/tasks/segment.md +++ b/docs/tasks/segment.md @@ -174,7 +174,7 @@ i.e. `yolo predict model=yolov8n-seg.onnx`. Usage examples are shown for your mo | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-seg.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-seg.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-seg.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-seg.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-seg.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-seg.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/docs/usage/cli.md b/docs/usage/cli.md index 227d6aa6fc..e08ef10a3e 100644 --- a/docs/usage/cli.md +++ b/docs/usage/cli.md @@ -172,7 +172,7 @@ i.e. `format='onnx'` or `format='engine'`. | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` | | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` | | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` | -| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` | | [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` | | [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` | | [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` | diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb index 23aab8d607..053c118a29 100644 --- a/examples/tutorial.ipynb +++ b/examples/tutorial.ipynb @@ -334,7 +334,7 @@ "| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |\n", "| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |\n", "| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |\n", - "| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | `imgsz`, `half`, `int8`, `nms` |\n", + "| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |\n", "| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |\n", "| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |\n", "| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |\n", diff --git a/requirements.txt b/requirements.txt index 7a457ddade..53f9a01914 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,7 +24,7 @@ pandas>=1.1.4 seaborn>=0.11.0 # Export -------------------------------------- -# coremltools>=6.0,<=6.2 # CoreML export +# coremltools>=7.0.b1 # CoreML export # onnx>=1.12.0 # ONNX export # onnxsim>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export diff --git a/setup.py b/setup.py index f315180038..23af63bca5 100644 --- a/setup.py +++ b/setup.py @@ -50,7 +50,7 @@ setup( 'mkdocs-ultralytics-plugin>=0.0.25', # for meta descriptions and images, dates and authors ], 'export': [ - 'coremltools>=6.0,<=6.2', + 'coremltools>=7.0.b1', 'openvino-dev>=2023.0', 'tensorflowjs', # automatically installs tensorflow ], }, diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index e2186dd080..8111704d7f 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = '8.0.149' +__version__ = '8.0.150' from ultralytics.hub import start from ultralytics.models import RTDETR, SAM, YOLO diff --git a/ultralytics/cfg/datasets/VOC.yaml b/ultralytics/cfg/datasets/VOC.yaml index ab3438791a..6bdcc4f139 100644 --- a/ultralytics/cfg/datasets/VOC.yaml +++ b/ultralytics/cfg/datasets/VOC.yaml @@ -72,7 +72,7 @@ download: | xmlbox = obj.find('bndbox') bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) cls_id = names.index(cls) # class id - out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + out_file.write(" ".join(str(a) for a in (cls_id, *bb)) + '\n') # Download diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py index 7cb784ce30..02cacf0dbd 100644 --- a/ultralytics/engine/exporter.py +++ b/ultralytics/engine/exporter.py @@ -9,7 +9,7 @@ TorchScript | `torchscript` | yolov8n.torchscript ONNX | `onnx` | yolov8n.onnx OpenVINO | `openvino` | yolov8n_openvino_model/ TensorRT | `engine` | yolov8n.engine -CoreML | `coreml` | yolov8n.mlmodel +CoreML | `coreml` | yolov8n.mlpackage TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ TensorFlow GraphDef | `pb` | yolov8n.pb TensorFlow Lite | `tflite` | yolov8n.tflite @@ -35,7 +35,7 @@ Inference: yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True yolov8n_openvino_model # OpenVINO yolov8n.engine # TensorRT - yolov8n.mlmodel # CoreML (macOS-only) + yolov8n.mlpackage # CoreML (macOS-only) yolov8n_saved_model # TensorFlow SavedModel yolov8n.pb # TensorFlow GraphDef yolov8n.tflite # TensorFlow Lite @@ -82,7 +82,7 @@ def export_formats(): ['ONNX', 'onnx', '.onnx', True, True], ['OpenVINO', 'openvino', '_openvino_model', True, False], ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], + ['CoreML', 'coreml', '.mlpackage', True, False], ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], ['TensorFlow GraphDef', 'pb', '.pb', True, True], ['TensorFlow Lite', 'tflite', '.tflite', True, False], @@ -149,8 +149,10 @@ class Exporter: self.run_callbacks('on_export_start') t = time.time() format = self.args.format.lower() # to lowercase - if format in ('tensorrt', 'trt'): # engine aliases + if format in ('tensorrt', 'trt'): # 'engine' aliases format = 'engine' + if format in ('mlmodel', 'mlpackage', 'mlprogram', 'apple', 'ios'): # 'coreml' aliases + format = 'coreml' fmts = tuple(export_formats()['Argument'][1:]) # available export formats flags = [x == format for x in fmts] if sum(flags) != 1: @@ -319,7 +321,7 @@ class Exporter: dynamic['output0'] = {0: 'batch', 2: 'anchors'} # shape(1, 84, 8400) torch.onnx.export( - self.model.cpu() if dynamic else self.model, # --dynamic only compatible with cpu + self.model.cpu() if dynamic else self.model, # dynamic=True only compatible with cpu self.im.cpu() if dynamic else self.im, f, verbose=False, @@ -461,14 +463,16 @@ class Exporter: yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml return str(f), None - @try_export def export_coreml(self, prefix=colorstr('CoreML:')): """YOLOv8 CoreML export.""" - check_requirements('coremltools>=6.0,<=6.2') + mlmodel = self.args.format.lower() == 'mlmodel' # legacy *.mlmodel export format requested + check_requirements('coremltools>=6.0,<=6.2' if mlmodel else 'coremltools>=7.0.b1') import coremltools as ct # noqa LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = self.file.with_suffix('.mlmodel') + f = self.file.with_suffix('.mlmodel' if mlmodel else '.mlpackage') + if f.is_dir(): + shutil.rmtree(f) bias = [0.0, 0.0, 0.0] scale = 1 / 255 @@ -479,20 +483,38 @@ class Exporter: elif self.model.task == 'detect': model = iOSDetectModel(self.model, self.im) if self.args.nms else self.model else: - # TODO CoreML Segment and Pose model pipelining + if self.args.nms: + LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is only available for Detect models like 'yolov8n.pt'.") + # TODO CoreML Segment and Pose model pipelining model = self.model ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=self.im.shape, scale=scale, bias=bias)], - classifier_config=classifier_config) - bits, mode = (8, 'kmeans_lut') if self.args.int8 else (16, 'linear') if self.args.half else (32, None) + classifier_config=classifier_config, + convert_to='neuralnetwork' if mlmodel else 'mlprogram') + bits, mode = (8, 'kmeans') if self.args.int8 else (16, 'linear') if self.args.half else (32, None) if bits < 32: if 'kmeans' in mode: check_requirements('scikit-learn') # scikit-learn package required for k-means quantization - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + if mlmodel: + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + import coremltools.optimize.coreml as cto + op_config = cto.OpPalettizerConfig(mode=mode, nbits=bits, weight_threshold=512) + config = cto.OptimizationConfig(global_config=op_config) + ct_model = cto.palettize_weights(ct_model, config=config) if self.args.nms and self.model.task == 'detect': - ct_model = self._pipeline_coreml(ct_model) + if mlmodel: + import platform + + # coremltools<=6.2 NMS export requires Python<3.11 + check_version(platform.python_version(), '<3.11', name='Python ', hard=True) + weights_dir = None + else: + ct_model.save(str(f)) # save otherwise weights_dir does not exist + weights_dir = str(f / 'Data/com.apple.CoreML/weights') + ct_model = self._pipeline_coreml(ct_model, weights_dir=weights_dir) m = self.metadata # metadata dict ct_model.short_description = m.pop('description') @@ -500,7 +522,14 @@ class Exporter: ct_model.license = m.pop('license') ct_model.version = m.pop('version') ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()}) - ct_model.save(str(f)) + try: + ct_model.save(str(f)) # save *.mlpackage + except Exception as e: + LOGGER.warning( + f'{prefix} WARNING ⚠️ CoreML export to *.mlpackage failed ({e}), reverting to *.mlmodel export. ' + f'Known coremltools Python 3.11 and Windows bugs https://github.com/apple/coremltools/issues/1928.') + f = f.with_suffix('.mlmodel') + ct_model.save(str(f)) return f, ct_model @try_export @@ -546,7 +575,7 @@ class Exporter: if self.args.dynamic: shape = self.im.shape if shape[0] <= 1: - LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') + LOGGER.warning(f"{prefix} WARNING ⚠️ 'dynamic=True' model requires max batch size, i.e. 'batch=16'") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *shape[1:]), (max(1, shape[0] // 2), *shape[1:]), shape) @@ -805,7 +834,7 @@ class Exporter: populator.populate() tmp_file.unlink() - def _pipeline_coreml(self, model, prefix=colorstr('CoreML Pipeline:')): + def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr('CoreML Pipeline:')): """YOLOv8 CoreML pipeline.""" import coremltools as ct # noqa @@ -853,7 +882,7 @@ class Exporter: # print(spec.description) # Model from spec - model = ct.models.MLModel(spec) + model = ct.models.MLModel(spec, weights_dir=weights_dir) # 3. Create NMS protobuf nms_spec = ct.proto.Model_pb2.Model() @@ -912,7 +941,7 @@ class Exporter: 'Confidence threshold': str(nms.confidenceThreshold)}) # Save the model - model = ct.models.MLModel(pipeline.spec) + model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir) model.input_description['image'] = 'Input image' model.input_description['iouThreshold'] = f'(optional) IOU threshold override (default: {nms.iouThreshold})' model.input_description['confidenceThreshold'] = \ diff --git a/ultralytics/engine/model.py b/ultralytics/engine/model.py index 8a8d59721f..a6028de310 100644 --- a/ultralytics/engine/model.py +++ b/ultralytics/engine/model.py @@ -320,7 +320,7 @@ class Model: half=overrides['half'], int8=overrides['int8'], device=overrides['device'], - verbose=overrides['verbose']) + verbose=kwargs.get('verbose')) def export(self, **kwargs): """ diff --git a/ultralytics/engine/predictor.py b/ultralytics/engine/predictor.py index d6b8f694cf..6406d18878 100644 --- a/ultralytics/engine/predictor.py +++ b/ultralytics/engine/predictor.py @@ -20,7 +20,7 @@ Usage - formats: yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True yolov8n_openvino_model # OpenVINO yolov8n.engine # TensorRT - yolov8n.mlmodel # CoreML (macOS-only) + yolov8n.mlpackage # CoreML (macOS-only) yolov8n_saved_model # TensorFlow SavedModel yolov8n.pb # TensorFlow GraphDef yolov8n.tflite # TensorFlow Lite diff --git a/ultralytics/engine/validator.py b/ultralytics/engine/validator.py index 2b08912cd4..1551cc38b7 100644 --- a/ultralytics/engine/validator.py +++ b/ultralytics/engine/validator.py @@ -11,7 +11,7 @@ Usage - formats: yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True yolov8n_openvino_model # OpenVINO yolov8n.engine # TensorRT - yolov8n.mlmodel # CoreML (macOS-only) + yolov8n.mlpackage # CoreML (macOS-only) yolov8n_saved_model # TensorFlow SavedModel yolov8n.pb # TensorFlow GraphDef yolov8n.tflite # TensorFlow Lite diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index df41dec6ba..e1c7ea8226 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -68,7 +68,7 @@ class AutoBackend(nn.Module): | ONNX Runtime | *.onnx | | ONNX OpenCV DNN | *.onnx dnn=True | | OpenVINO | *.xml | - | CoreML | *.mlmodel | + | CoreML | *.mlpackage | | TensorRT | *.engine | | TensorFlow SavedModel | *_saved_model | | TensorFlow GraphDef | *.pb | @@ -485,8 +485,13 @@ class AutoBackend(nn.Module): sf = list(export_formats().Suffix) # export suffixes if not is_url(p, check=False) and not isinstance(p, str): check_suffix(p, sf) # checks - url = urlparse(p) # if url may be Triton inference server - types = [s in Path(p).name for s in sf] + name = Path(p).name + types = [s in name for s in sf] + types[5] |= name.endswith('.mlmodel') # retain support for older Apple CoreML *.mlmodel formats types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) + if any(types): + triton = False + else: + url = urlparse(p) # if url may be Triton inference server + triton = all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py index 7dfadce574..669f596c70 100644 --- a/ultralytics/utils/benchmarks.py +++ b/ultralytics/utils/benchmarks.py @@ -14,7 +14,7 @@ TorchScript | `torchscript` | yolov8n.torchscript ONNX | `onnx` | yolov8n.onnx OpenVINO | `openvino` | yolov8n_openvino_model/ TensorRT | `engine` | yolov8n.engine -CoreML | `coreml` | yolov8n.mlmodel +CoreML | `coreml` | yolov8n.mlpackage TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ TensorFlow GraphDef | `pb` | yolov8n.pb TensorFlow Lite | `tflite` | yolov8n.tflite diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py index 9cc02d586c..1c4d05ce51 100644 --- a/ultralytics/utils/checks.py +++ b/ultralytics/utils/checks.py @@ -265,20 +265,21 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=() elif isinstance(requirements, str): requirements = [requirements] - s = '' # console string pkgs = [] for r in requirements: r_stripped = r.split('/')[-1].replace('.git', '') # replace git+https://org/repo.git -> 'repo' try: - pkg.require(r_stripped) - except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + pkg.require(r_stripped) # exception if requirements not met + except pkg.DistributionNotFound: try: # attempt to import (slower but more accurate) import importlib importlib.import_module(next(pkg.parse_requirements(r_stripped)).name) except ImportError: - s += f'"{r}" ' pkgs.append(r) + except pkg.VersionConflict: + pkgs.append(r) + s = ' '.join(f'"{x}"' for x in pkgs) # console string if s: if install and AUTOINSTALL: # check environment variable n = len(pkgs) # number of packages updates