diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 0af9318e4b..e3b2bea301 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest] + os: [ubuntu-latest, windows-latest, macos-latest] python-version: ['3.10'] # requires python<=3.9 model: [yolov8n] steps: @@ -34,7 +34,8 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip wheel - pip install -e '.[export]' --extra-index-url https://download.pytorch.org/whl/cpu + pip install -e . coremltools openvino-dev tensorflow-cpu paddlepaddle x2paddle --extra-index-url https://download.pytorch.org/whl/cpu + yolo export format=tflite - name: Check environment run: | echo "RUNNER_OS is ${{ runner.os }}" @@ -46,29 +47,21 @@ jobs: python --version pip --version pip list - - name: TF Lite export - run: | - yolo export model=${{ matrix.model }}.pt format=tflite - yolo task=detect mode=predict model=yolov8n_saved_model/yolov8n_float16.tflite imgsz=640 - - name: TF *.pb export - run: | - yolo export model=${{ matrix.model }}.pt format=pb - yolo task=detect mode=predict model=yolov8n.pb imgsz=640 - - name: TF Lite Edge TPU export - run: | - yolo export model=${{ matrix.model }}.pt format=edgetpu - - name: TF.js export - run: | - yolo export model=${{ matrix.model }}.pt format=tfjs - name: Benchmark DetectionModel + shell: python run: | - # yolo benchmark model=${{ matrix.model }}.pt imgsz=320 min_metric=0.29 + from ultralytics.yolo.utils.benchmarks import run_benchmarks + run_benchmarks(model='yolov8n.pt', imgsz=160, half=False, hard_fail=False) - name: Benchmark SegmentationModel + shell: python run: | - # yolo benchmark model=${{ matrix.model }}-seg.pt imgsz=320 min_metric=0.29 + from ultralytics.yolo.utils.benchmarks import run_benchmarks + run_benchmarks(model='yolov8n-seg.pt', imgsz=160, half=False, hard_fail=False) - name: Benchmark ClassificationModel + shell: python run: | - # yolo benchmark model=${{ matrix.model }}-cls.pt imgsz=224 min_metric=0.29 + from ultralytics.yolo.utils.benchmarks import run_benchmarks + run_benchmarks(model='yolov8n-cls.pt', imgsz=160, half=False, hard_fail=False) Tests: timeout-minutes: 60 @@ -76,7 +69,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [ubuntu-latest] python-version: ['3.10'] model: [yolov8n] torch: [latest] @@ -155,9 +148,9 @@ jobs: - name: Test classification shell: bash # for Windows compatibility run: | - yolo task=classify mode=train data=mnist160 model=yolov8n-cls.yaml epochs=1 imgsz=32 - yolo task=classify mode=train data=mnist160 model=yolov8n-cls.pt epochs=1 imgsz=32 - yolo task=classify mode=val data=mnist160 model=runs/classify/train/weights/last.pt imgsz=32 + yolo task=classify mode=train data=imagenet10 model=yolov8n-cls.yaml epochs=1 imgsz=32 + yolo task=classify mode=train data=imagenet10 model=yolov8n-cls.pt epochs=1 imgsz=32 + yolo task=classify mode=val data=imagenet10 model=runs/classify/train/weights/last.pt imgsz=32 yolo task=classify mode=predict model=runs/classify/train/weights/last.pt imgsz=32 source=ultralytics/assets/bus.jpg yolo mode=export model=runs/classify/train/weights/last.pt imgsz=32 format=torchscript - name: Pytest tests diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 82b1fcee8f..dd05788b59 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -59,7 +59,9 @@ To allow your work to be integrated as seamlessly as possible, we advise you to: ### Docstrings -Not all functions or classes require docstrings but when they do, we follow [google-style docstrings format](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). Here is an example: +Not all functions or classes require docstrings but when they do, we +follow [google-style docstrings format](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). +Here is an example: ```python """ diff --git a/README.md b/README.md index 3cddc03f14..eae7e0a405 100644 --- a/README.md +++ b/README.md @@ -222,7 +222,10 @@ See [Classification Docs](https://docs.ultralytics.com/tasks/classification/) fo ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 (coming soon) 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data +visualization, YOLOv5 and YOLOv8 (coming soon) 🚀 model training and deployment, without any coding. Transform images +into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and +user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! @@ -250,7 +253,9 @@ YOLOv8 is available under two different licenses: ##
Contact
-For YOLOv8 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/). +For YOLOv8 bug reports and feature requests please +visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) or +the [Ultralytics Community Forum](https://community.ultralytics.com/).
diff --git a/README.zh-CN.md b/README.zh-CN.md index e9ec5856e0..13aede8fd1 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -18,7 +18,9 @@

-[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) 是由 [Ultralytics](https://ultralytics.com) 开发的一个前沿的 SOTA 模型。它在以前成功的 YOLO 版本基础上,引入了新的功能和改进,进一步提升了其性能和灵活性。YOLOv8 基于快速、准确和易于使用的设计理念,使其成为广泛的目标检测、图像分割和图像分类任务的绝佳选择。 +[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) 是由 [Ultralytics](https://ultralytics.com) 开发的一个前沿的 +SOTA 模型。它在以前成功的 YOLO 版本基础上,引入了新的功能和改进,进一步提升了其性能和灵活性。YOLOv8 +基于快速、准确和易于使用的设计理念,使其成为广泛的目标检测、图像分割和图像分类任务的绝佳选择。 如果要申请企业许可证,请填写 [Ultralytics 许可](https://ultralytics.com/license)。 @@ -53,7 +55,9 @@
安装 -Pip 安装包含所有 [requirements.txt](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) 的 ultralytics 包,环境要求 [**Python>=3.7**](https://www.python.org/),且 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/)。 +Pip 安装包含所有 [requirements.txt](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) 的 +ultralytics 包,环境要求 [**Python>=3.7**](https://www.python.org/),且 [\*\*PyTorch>=1.7 +\*\*](https://pytorch.org/get-started/locally/)。 ```bash pip install ultralytics @@ -70,7 +74,8 @@ YOLOv8 可以直接在命令行界面(CLI)中使用 `yolo` 命令运行: yolo predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" ``` -`yolo`可以用于各种任务和模式,并接受额外的参数,例如 `imgsz=640`。参见 YOLOv8 [文档](https://docs.ultralytics.com)中可用`yolo`[参数](https://docs.ultralytics.com/cfg/)的完整列表。 +`yolo`可以用于各种任务和模式,并接受额外的参数,例如 `imgsz=640`。参见 YOLOv8 [文档](https://docs.ultralytics.com) +中可用`yolo`[参数](https://docs.ultralytics.com/cfg/)的完整列表。 ```bash yolo task=detect mode=train model=yolov8n.pt args... @@ -95,11 +100,13 @@ results = model("https://ultralytics.com/images/bus.jpg") # 预测图像 success = model.export(format="onnx") # 将模型导出为 ONNX 格式 ``` -[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 +[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 +Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 ### 已知问题 / 待办事项 -我们仍在努力完善 YOLOv8 的几个部分!我们的目标是尽快完成这些工作,使 YOLOv8 的功能设置达到YOLOv5 的水平,包括对所有相同格式的导出和推理。我们还在写一篇 YOLOv8 的论文,一旦完成,我们将提交给 [arxiv.org](https://arxiv.org)。 +我们仍在努力完善 YOLOv8 的几个部分!我们的目标是尽快完成这些工作,使 YOLOv8 的功能设置达到YOLOv5 +的水平,包括对所有相同格式的导出和推理。我们还在写一篇 YOLOv8 的论文,一旦完成,我们将提交给 [arxiv.org](https://arxiv.org)。 - [x] TensorFlow 导出 - [x] DDP 恢复训练 @@ -111,7 +118,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 所有 YOLOv8 的预训练模型都可以在这里找到。目标检测和分割模型是在 COCO 数据集上预训练的,而分类模型是在 ImageNet 数据集上预训练的。 -第一次使用时,[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 +第一次使用时,[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 +Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。
目标检测 @@ -125,7 +133,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 - **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。
复现命令 `yolo val detect data=coco.yaml device=0` -- **推理速度**使用 COCO 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 +- **推理速度**使用 COCO + 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。
复现命令 `yolo val detect data=coco128.yaml batch=1 device=0/cpu`
@@ -142,7 +151,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 - **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。
复现命令 `yolo val segment data=coco.yaml device=0` -- **推理速度**使用 COCO 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 +- **推理速度**使用 COCO + 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。
复现命令 `yolo val segment data=coco128-seg.yaml batch=1 device=0/cpu`
@@ -159,7 +169,8 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 - **acc** 都在 [ImageNet](https://www.image-net.org/) 数据集上,使用单模型单尺度测试得到。
复现命令 `yolo val classify data=path/to/ImageNet device=0` -- **推理速度**使用 ImageNet 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 +- **推理速度**使用 ImageNet + 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。
复现命令 `yolo val classify data=path/to/ImageNet batch=1 device=0/cpu` @@ -192,14 +203,18 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 ##
Ultralytics HUB
-[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们⭐ **新**的无代码解决方案,用于可视化数据集,训练 YOLOv8🚀 模型,并以无缝体验方式部署到现实世界。现在开始**免费**! 还可以通过下载 [Ultralytics App](https://ultralytics.com/app_install) 在你的 iOS 或 Android 设备上运行 YOLOv8 模型! +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们⭐ **新**的无代码解决方案,用于可视化数据集,训练 YOLOv8🚀 +模型,并以无缝体验方式部署到现实世界。现在开始**免费**! +还可以通过下载 [Ultralytics App](https://ultralytics.com/app_install) 在你的 iOS 或 Android 设备上运行 YOLOv8 模型! ##
贡献
-我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv8 做出贡献。请看我们的 [贡献指南](CONTRIBUTING.md) ,并填写 [调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv8 做出贡献。请看我们的 [贡献指南](CONTRIBUTING.md) +,并填写 [调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) +向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -210,11 +225,13 @@ success = model.export(format="onnx") # 将模型导出为 ONNX 格式 - YOLOv8 在两种不同的 License 下可用: - **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件的详细信息。 - - **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 + - **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI + 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 ##
联系我们
-请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv8 错误和请求功能。 +请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) +或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv8 错误和请求功能。
diff --git a/docker/Dockerfile b/docker/Dockerfile index 92d7fd3140..89964c1c2b 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ # RUN alias python=python3 # Security updates diff --git a/docker/Dockerfile-cpu b/docker/Dockerfile-cpu index 10b73d7350..2a93c55c15 100644 --- a/docker/Dockerfile-cpu +++ b/docker/Dockerfile-cpu @@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ # RUN alias python=python3 # Create working directory diff --git a/docs/SECURITY.md b/docs/SECURITY.md index 5833ea78b4..c00e145838 100644 --- a/docs/SECURITY.md +++ b/docs/SECURITY.md @@ -1,17 +1,26 @@ -At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented several measures to detect and prevent security vulnerabilities. +At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To +ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented +several measures to detect and prevent security vulnerabilities. [![ultralytics](https://snyk.io/advisor/python/ultralytics/badge.svg)](https://snyk.io/advisor/python/ultralytics) ## Snyk Scanning -We use [Snyk](https://snyk.io/advisor/python/ultralytics) to regularly scan the YOLOv8 repository for vulnerabilities and security issues. Our goal is to identify and remediate any potential threats as soon as possible, to minimize any risks to our users. +We use [Snyk](https://snyk.io/advisor/python/ultralytics) to regularly scan the YOLOv8 repository for vulnerabilities +and security issues. Our goal is to identify and remediate any potential threats as soon as possible, to minimize any +risks to our users. ## GitHub CodeQL Scanning -In addition to our Snyk scans, we also use GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) scans to proactively identify and address security vulnerabilities. +In addition to our Snyk scans, we also use +GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) +scans to proactively identify and address security vulnerabilities. ## Reporting Security Issues -If you suspect or discover a security vulnerability in the YOLOv8 repository, please let us know immediately. You can reach out to us directly via our [contact form](https://ultralytics.com/contact) or via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon as possible. +If you suspect or discover a security vulnerability in the YOLOv8 repository, please let us know immediately. You can +reach out to us directly via our [contact form](https://ultralytics.com/contact) or +via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon +as possible. We appreciate your help in keeping the YOLOv8 repository secure and safe for everyone. diff --git a/docs/app.md b/docs/app.md index e6a45aa651..6870874032 100644 --- a/docs/app.md +++ b/docs/app.md @@ -35,7 +35,7 @@
Welcome to the Ultralytics HUB app for demonstrating YOLOv5 and YOLOv8 models! In this app, available on the [Apple App -Store](https://apps.apple.com/xk/app/ultralytics/id1583935240) and the +Store](https://apps.apple.com/xk/app/ultralytics/id1583935240) and the [Google Play Store](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app), you will be able to see the power and capabilities of YOLOv5, a state-of-the-art object detection model developed by Ultralytics. diff --git a/docs/callbacks.md b/docs/callbacks.md index 5dce9b0e16..4a3834f97d 100644 --- a/docs/callbacks.md +++ b/docs/callbacks.md @@ -1,10 +1,15 @@ ## Callbacks -Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of these objects can be found in Reference section of the docs. + +Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. +Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of +these objects can be found in Reference section of the docs. ## Examples ### Returning additional information with Prediction + In this example, we want to return the original frame with each result object. Here's how we can do that + ```python def on_predict_batch_end(predictor): # results -> List[batch_size] @@ -19,8 +24,11 @@ for (result, frame) in model.track/predict(): ``` ## All callbacks + Here are all supported callbacks. + ### Trainer + `on_pretrain_routine_start` `on_pretrain_routine_end` @@ -50,6 +58,7 @@ Here are all supported callbacks. `teardown` ### Validator + `on_val_start` `on_val_batch_start` @@ -59,6 +68,7 @@ Here are all supported callbacks. `on_val_end` ### Predictor + `on_predict_start` `on_predict_batch_start` @@ -70,6 +80,7 @@ Here are all supported callbacks. `on_predict_end` ### Exporter + `on_export_start` `on_export_end` diff --git a/docs/cli.md b/docs/cli.md index 327ee53947..0c32246f6f 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -39,17 +39,19 @@ the [Configuration](cfg.md) page. yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 yolo detect train resume model=last.pt # resume training ``` + ## Val Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's training `data` and arguments as model attributes. !!! example "" - + ```bash yolo detect val model=yolov8n.pt # val official model yolo detect val model=path/to/best.pt # val custom model ``` + ## Predict Use a trained YOLOv8n model to run predictions on images. @@ -60,12 +62,13 @@ Use a trained YOLOv8n model to run predictions on images. yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model ``` + ## Export Export a YOLOv8n model to a different format like ONNX, CoreML, etc. !!! example "" - + ```bash yolo export model=yolov8n.pt format=onnx # export official model yolo export model=path/to/best.pt format=onnx # export custom trained model diff --git a/docs/hub.md b/docs/hub.md index 1ae00b1ca5..cf451300b6 100644 --- a/docs/hub.md +++ b/docs/hub.md @@ -98,7 +98,7 @@ Click 'Upload Dataset' to upload, scan and visualize your new dataset before tra ## 2. Train a Model -Connect to the Ultralytics HUB notebook and use your model API key to begin training! +Connect to the Ultralytics HUB notebook and use your model API key to begin training! Open In Colab @@ -106,8 +106,8 @@ Connect to the Ultralytics HUB notebook and use your model API key to begin trai ## 3. Deploy to Real World Export your model to 13 different formats, including TensorFlow, ONNX, OpenVINO, CoreML, Paddle and many others. Run -models directly on your [iOS](https://apps.apple.com/xk/app/ultralytics/id1583935240) or -[Android](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app) mobile device by downloading +models directly on your [iOS](https://apps.apple.com/xk/app/ultralytics/id1583935240) or +[Android](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app) mobile device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! ## ❓ Issues diff --git a/docs/predict.md b/docs/predict.md index 569bad8448..be3c3a7ee7 100644 --- a/docs/predict.md +++ b/docs/predict.md @@ -96,12 +96,15 @@ Class reference documentation for `Results` module and its components can be fou ## Visualizing results -You can use `visualize()` function of `Result` object to get a visualization. It plots all components(boxes, masks, classification logits, etc) found in the results object +You can use `visualize()` function of `Result` object to get a visualization. It plots all components(boxes, masks, +classification logits, etc) found in the results object + ```python res = model(img) res_plotted = res[0].visualize() cv2.imshow("result", res_plotted) ``` + !!! example "`visualize()` arguments" `show_conf (bool)`: Show confidence diff --git a/examples/README.md b/examples/README.md index b8aaf39f54..c7ba2daa43 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,7 +4,7 @@ This is a list of real-world applications and walkthroughs. These can be folders | Title | Format | Contributor | | --------------------------------------------------------------- | ------------------ | --------------------------------------------------- | -| [Yolov8/yolov5 ONNX Inference with C++](./Yolov8_CPP_Inference) | C++/ONNX | [Justas Bartnykas](https://github.com/JustasBart) | +| [Yolov8/yolov5 ONNX Inference with C++](./YOLOv8-CPP-Inference) | C++/ONNX | [Justas Bartnykas](https://github.com/JustasBart) | | [YOLOv8-OpenCV-ONNX-Python](./YOLOv8-OpenCV-ONNX-Python) | OpenCV/Python/ONNX | [Farid Inawan](https://github.com/frdteknikelektro) | ## How can you contribute ? diff --git a/examples/Yolov8_CPP_Inference/CMakeLists.txt b/examples/YOLOv8-CPP-Inference/CMakeLists.txt similarity index 100% rename from examples/Yolov8_CPP_Inference/CMakeLists.txt rename to examples/YOLOv8-CPP-Inference/CMakeLists.txt diff --git a/examples/Yolov8_CPP_Inference/README.md b/examples/YOLOv8-CPP-Inference/README.md similarity index 71% rename from examples/Yolov8_CPP_Inference/README.md rename to examples/YOLOv8-CPP-Inference/README.md index b1381467d3..4eca0ce41a 100644 --- a/examples/Yolov8_CPP_Inference/README.md +++ b/examples/YOLOv8-CPP-Inference/README.md @@ -48,6 +48,10 @@ yolov5s.onnx: ![image](https://user-images.githubusercontent.com/40023722/217357005-07464492-d1da-42e3-98a7-fc753f87d5e6.png) -This repository is based on OpenCVs dnn API to run an ONNX exported model of either yolov5/yolov8 (In theory should work for yolov6 and yolov7 but not tested). Note that for this example the networks are exported as rectangular (640x480) resolutions, but it would work for any resolution that you export as although you might want to use the letterBox approach for square images depending on your use-case. +This repository is based on OpenCVs dnn API to run an ONNX exported model of either yolov5/yolov8 (In theory should work +for yolov6 and yolov7 but not tested). Note that for this example the networks are exported as rectangular (640x480) +resolutions, but it would work for any resolution that you export as although you might want to use the letterBox +approach for square images depending on your use-case. -The **main** branch version is based on using Qt as a GUI wrapper the main interest here is the **Inference** class file which shows how to transpose yolov8 models to work as yolov5 models. +The **main** branch version is based on using Qt as a GUI wrapper the main interest here is the **Inference** class file +which shows how to transpose yolov8 models to work as yolov5 models. diff --git a/examples/Yolov8_CPP_Inference/inference.cpp b/examples/YOLOv8-CPP-Inference/inference.cpp similarity index 100% rename from examples/Yolov8_CPP_Inference/inference.cpp rename to examples/YOLOv8-CPP-Inference/inference.cpp diff --git a/examples/Yolov8_CPP_Inference/inference.h b/examples/YOLOv8-CPP-Inference/inference.h similarity index 100% rename from examples/Yolov8_CPP_Inference/inference.h rename to examples/YOLOv8-CPP-Inference/inference.h diff --git a/examples/Yolov8_CPP_Inference/main.cpp b/examples/YOLOv8-CPP-Inference/main.cpp similarity index 100% rename from examples/Yolov8_CPP_Inference/main.cpp rename to examples/YOLOv8-CPP-Inference/main.cpp diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb index c16502be10..b2685e4ae1 100644 --- a/examples/tutorial.ipynb +++ b/examples/tutorial.ipynb @@ -49,7 +49,7 @@ "source": [ "# Setup\n", "\n", - "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) and check PyTorch and GPU." + "Pip install `ultralytics` and [dependencies](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) and check software and hardware." ] }, { @@ -62,7 +62,6 @@ "outputId": "9bda69d4-e57f-404b-b6fe-117234e24677" }, "source": [ - "# Pip install method (recommended)\n", "%pip install ultralytics\n", "import ultralytics\n", "ultralytics.checks()" @@ -80,17 +79,11 @@ ] }, { - "cell_type": "code", - "source": [ - "# Git clone method (for development)\n", - "!git clone https://github.com/ultralytics/ultralytics\n", - "%pip install -qe ultralytics" - ], + "cell_type": "markdown", + "source": [], "metadata": { - "id": "TUFPge7f_1ms" - }, - "execution_count": null, - "outputs": [] + "id": "ZOwTlorPd8-D" + } }, { "cell_type": "markdown", @@ -655,6 +648,19 @@ "Additional content below." ] }, + { + "cell_type": "code", + "source": [ + "# Git clone install (for development)\n", + "!git clone https://github.com/ultralytics/ultralytics\n", + "%pip install -qe ultralytics" + ], + "metadata": { + "id": "uRKlwxSJdhd1" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { diff --git a/tests/test_cli.py b/tests/test_cli.py index 1845b71478..57ac03176d 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -30,7 +30,7 @@ def test_train_seg(): def test_train_cls(): - run(f'yolo train classify model={CFG}-cls.yaml data=mnist160 imgsz=32 epochs=1') + run(f'yolo train classify model={CFG}-cls.yaml data=imagenet10 imgsz=32 epochs=1') # Val checks ----------------------------------------------------------------------------------------------------------- @@ -43,7 +43,7 @@ def test_val_segment(): def test_val_classify(): - run(f'yolo val classify model={MODEL}-cls.pt data=mnist160 imgsz=32') + run(f'yolo val classify model={MODEL}-cls.pt data=imagenet10 imgsz=32') # Predict checks ------------------------------------------------------------------------------------------------------- diff --git a/tests/test_engine.py b/tests/test_engine.py index 25c20c2db9..52c68c0892 100644 --- a/tests/test_engine.py +++ b/tests/test_engine.py @@ -74,8 +74,14 @@ def test_segment(): def test_classify(): - overrides = {'data': 'mnist160', 'model': 'yolov8n-cls.yaml', 'imgsz': 32, 'epochs': 1, 'batch': 64, 'save': False} - CFG.data = 'mnist160' + overrides = { + 'data': 'imagenet10', + 'model': 'yolov8n-cls.yaml', + 'imgsz': 32, + 'epochs': 1, + 'batch': 64, + 'save': False} + CFG.data = 'imagenet10' CFG.imgsz = 32 CFG.batch = 64 # YOLO(CFG_SEG).train(**overrides) # works diff --git a/tests/test_python.py b/tests/test_python.py index 7e4ad9d3b1..1ac6dc457b 100644 --- a/tests/test_python.py +++ b/tests/test_python.py @@ -154,7 +154,7 @@ def test_predict_callback_and_setup(): bs = [predictor.dataset.bs for _ in range(len(path))] predictor.results = zip(predictor.results, im0s, bs) - model = YOLO('yolov8n.pt') + model = YOLO(MODEL) model.add_callback('on_predict_batch_end', on_predict_batch_end) dataset = load_inference_source(source=SOURCE, transforms=model.transforms) @@ -169,9 +169,16 @@ def test_predict_callback_and_setup(): def test_result(): model = YOLO('yolov8n-seg.pt') - img = str(ROOT / 'assets/bus.jpg') - res = model([img, img]) + res = model([SOURCE, SOURCE]) res[0].numpy() res[0].cpu().numpy() resimg = res[0].visualize(show_conf=False) print(resimg) + + model = YOLO('yolov8n.pt') + res = model(SOURCE) + res[0].visualize() + + model = YOLO('yolov8n-cls.pt') + res = model(SOURCE) + res[0].visualize() diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 03c5557967..89e490578e 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license -__version__ = '8.0.40' +__version__ = '8.0.41' from ultralytics.yolo.engine.model import YOLO from ultralytics.yolo.utils.checks import check_yolo as checks diff --git a/ultralytics/yolo/data/datasets/Argoverse.yaml b/ultralytics/datasets/Argoverse.yaml similarity index 98% rename from ultralytics/yolo/data/datasets/Argoverse.yaml rename to ultralytics/datasets/Argoverse.yaml index 90e303ae90..0be17c8979 100644 --- a/ultralytics/yolo/data/datasets/Argoverse.yaml +++ b/ultralytics/datasets/Argoverse.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI -# Example usage: python train.py --data Argoverse.yaml +# Example usage: yolo train data=Argoverse.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/GlobalWheat2020.yaml b/ultralytics/datasets/GlobalWheat2020.yaml similarity index 96% rename from ultralytics/yolo/data/datasets/GlobalWheat2020.yaml rename to ultralytics/datasets/GlobalWheat2020.yaml index 71f2cc4257..c41cb4f5f8 100644 --- a/ultralytics/yolo/data/datasets/GlobalWheat2020.yaml +++ b/ultralytics/datasets/GlobalWheat2020.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan -# Example usage: python train.py --data GlobalWheat2020.yaml +# Example usage: yolo train data=GlobalWheat2020.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/ImageNet.yaml b/ultralytics/datasets/ImageNet.yaml similarity index 99% rename from ultralytics/yolo/data/datasets/ImageNet.yaml rename to ultralytics/datasets/ImageNet.yaml index c42c0eb5ab..87cf6f166d 100644 --- a/ultralytics/yolo/data/datasets/ImageNet.yaml +++ b/ultralytics/datasets/ImageNet.yaml @@ -1,7 +1,7 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels -# Example usage: python classify/train.py --data imagenet +# Example usage: yolo train task=classify data=imagenet # parent # ├── yolov5 # └── datasets @@ -2022,4 +2022,4 @@ map: # Download script/URL (optional) -download: data/scripts/get_imagenet.sh +download: yolo/data/scripts/get_imagenet.sh diff --git a/ultralytics/yolo/data/datasets/Objects365.yaml b/ultralytics/datasets/Objects365.yaml similarity index 99% rename from ultralytics/yolo/data/datasets/Objects365.yaml rename to ultralytics/datasets/Objects365.yaml index 1c65125317..ad9e925a73 100644 --- a/ultralytics/yolo/data/datasets/Objects365.yaml +++ b/ultralytics/datasets/Objects365.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # Objects365 dataset https://www.objects365.org/ by Megvii -# Example usage: python train.py --data Objects365.yaml +# Example usage: yolo train data=Objects365.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/SKU-110K.yaml b/ultralytics/datasets/SKU-110K.yaml similarity index 97% rename from ultralytics/yolo/data/datasets/SKU-110K.yaml rename to ultralytics/datasets/SKU-110K.yaml index afa90b7949..6052177dd3 100644 --- a/ultralytics/yolo/data/datasets/SKU-110K.yaml +++ b/ultralytics/datasets/SKU-110K.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail -# Example usage: python train.py --data SKU-110K.yaml +# Example usage: yolo train data=SKU-110K.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/VOC.yaml b/ultralytics/datasets/VOC.yaml similarity index 98% rename from ultralytics/yolo/data/datasets/VOC.yaml rename to ultralytics/datasets/VOC.yaml index a3a3ba0377..8e5d68c21c 100644 --- a/ultralytics/yolo/data/datasets/VOC.yaml +++ b/ultralytics/datasets/VOC.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford -# Example usage: python train.py --data VOC.yaml +# Example usage: yolo train data=VOC.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/VisDrone.yaml b/ultralytics/datasets/VisDrone.yaml similarity index 98% rename from ultralytics/yolo/data/datasets/VisDrone.yaml rename to ultralytics/datasets/VisDrone.yaml index 634860606d..141b568b0f 100644 --- a/ultralytics/yolo/data/datasets/VisDrone.yaml +++ b/ultralytics/datasets/VisDrone.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University -# Example usage: python train.py --data VisDrone.yaml +# Example usage: yolo train data=VisDrone.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/coco.yaml b/ultralytics/datasets/coco.yaml similarity index 98% rename from ultralytics/yolo/data/datasets/coco.yaml rename to ultralytics/datasets/coco.yaml index 650a742ab0..6c4bc20edd 100644 --- a/ultralytics/yolo/data/datasets/coco.yaml +++ b/ultralytics/datasets/coco.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # COCO 2017 dataset http://cocodataset.org by Microsoft -# Example usage: python train.py --data coco.yaml +# Example usage: yolo train data=coco.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/coco128-seg.yaml b/ultralytics/datasets/coco128-seg.yaml similarity index 97% rename from ultralytics/yolo/data/datasets/coco128-seg.yaml rename to ultralytics/datasets/coco128-seg.yaml index 91a8b96a4a..42403877cb 100644 --- a/ultralytics/yolo/data/datasets/coco128-seg.yaml +++ b/ultralytics/datasets/coco128-seg.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics -# Example usage: python train.py --data coco128.yaml +# Example usage: yolo train data=coco128.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/coco128.yaml b/ultralytics/datasets/coco128.yaml similarity index 97% rename from ultralytics/yolo/data/datasets/coco128.yaml rename to ultralytics/datasets/coco128.yaml index 60f582006f..0e02812a1e 100644 --- a/ultralytics/yolo/data/datasets/coco128.yaml +++ b/ultralytics/datasets/coco128.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics -# Example usage: python train.py --data coco128.yaml +# Example usage: yolo train data=coco128.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/coco8-seg.yaml b/ultralytics/datasets/coco8-seg.yaml similarity index 96% rename from ultralytics/yolo/data/datasets/coco8-seg.yaml rename to ultralytics/datasets/coco8-seg.yaml index 14c9fbd5b6..0dfa376f09 100644 --- a/ultralytics/yolo/data/datasets/coco8-seg.yaml +++ b/ultralytics/datasets/coco8-seg.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # COCO8-seg dataset (first 8 images from COCO train2017) by Ultralytics -# Example usage: python train.py --data coco8-seg.yaml +# Example usage: yolo train data=coco8-seg.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/coco8.yaml b/ultralytics/datasets/coco8.yaml similarity index 97% rename from ultralytics/yolo/data/datasets/coco8.yaml rename to ultralytics/datasets/coco8.yaml index f3920e8480..cb80516302 100644 --- a/ultralytics/yolo/data/datasets/coco8.yaml +++ b/ultralytics/datasets/coco8.yaml @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # COCO8 dataset (first 8 images from COCO train2017) by Ultralytics -# Example usage: python train.py --data coco8.yaml +# Example usage: yolo train data=coco8.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/yolo/data/datasets/xView.yaml b/ultralytics/datasets/xView.yaml similarity index 99% rename from ultralytics/yolo/data/datasets/xView.yaml rename to ultralytics/datasets/xView.yaml index 95d8aff1eb..e2ffca67e6 100644 --- a/ultralytics/yolo/data/datasets/xView.yaml +++ b/ultralytics/datasets/xView.yaml @@ -1,7 +1,7 @@ # Ultralytics YOLO 🚀, GPL-3.0 license # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- -# Example usage: python train.py --data xView.yaml +# Example usage: yolo train data=xView.yaml # parent # ├── yolov5 # └── datasets diff --git a/ultralytics/hub/utils.py b/ultralytics/hub/utils.py index 7d60c7ba04..8a2bc49101 100644 --- a/ultralytics/hub/utils.py +++ b/ultralytics/hub/utils.py @@ -11,9 +11,9 @@ from random import random import requests -from ultralytics.yolo.utils import (DEFAULT_CFG_DICT, ENVIRONMENT, LOGGER, RANK, SETTINGS, TryExcept, __version__, - colorstr, emojis, get_git_origin_url, is_colab, is_git_dir, is_github_actions_ci, - is_pip_package, is_pytest_running) +from ultralytics.yolo.utils import (DEFAULT_CFG_DICT, ENVIRONMENT, LOGGER, RANK, SETTINGS, TESTS_RUNNING, TryExcept, + __version__, colorstr, emojis, get_git_origin_url, is_colab, is_git_dir, + is_pip_package) from ultralytics.yolo.utils.checks import check_online PREFIX = colorstr('Ultralytics: ') @@ -157,8 +157,7 @@ class Traces: self.enabled = SETTINGS['sync'] and \ RANK in {-1, 0} and \ check_online() and \ - not is_pytest_running() and \ - not is_github_actions_ci() and \ + not TESTS_RUNNING and \ (is_pip_package() or get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git') def __call__(self, cfg, all_keys=False, traces_sample_rate=1.0): diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index 6165b8286b..5b91a96ca5 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -28,13 +28,18 @@ def check_class_names(names): if not all(isinstance(k, int) for k in names.keys()): # convert string keys to int, i.e. '0' to 0 names = {int(k): v for k, v in names.items()} if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764' - map = yaml_load(ROOT / 'yolo/data/datasets/ImageNet.yaml')['map'] # human-readable names + map = yaml_load(ROOT / 'datasets/ImageNet.yaml')['map'] # human-readable names names = {k: map[v] for k, v in names.items()} return names class AutoBackend(nn.Module): + def _apply_default_class_names(self, data): + with contextlib.suppress(Exception): + return yaml_load(check_yaml(data))['names'] + return {i: f'class{i}' for i in range(999)} # return default if above errors + def __init__(self, weights='yolov8n.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): """ MultiBackend class for python inference on various platforms using Ultralytics YOLO. @@ -53,7 +58,7 @@ class AutoBackend(nn.Module): | PyTorch | *.pt | | TorchScript | *.torchscript | | ONNX Runtime | *.onnx | - | ONNX OpenCV DNN | *.onnx --dnn | + | ONNX OpenCV DNN | *.onnx dnn=True | | OpenVINO | *.xml | | CoreML | *.mlmodel | | TensorRT | *.engine | @@ -142,13 +147,9 @@ class AutoBackend(nn.Module): logger = trt.Logger(trt.Logger.INFO) # Read file with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - # Read metadata length - meta_len = int.from_bytes(f.read(4), byteorder='little') - # Read metadata - meta = json.loads(f.read(meta_len).decode('utf-8')) - stride, names = int(meta['stride']), meta['names'] - # Read engine - model = runtime.deserialize_cuda_engine(f.read()) + meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length + meta = json.loads(f.read(meta_len).decode('utf-8')) # read metadata + model = runtime.deserialize_cuda_engine(f.read()) # read engine context = model.create_execution_context() bindings = OrderedDict() output_names = [] @@ -170,6 +171,7 @@ class AutoBackend(nn.Module): bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size + stride, names = int(meta['stride']), meta['names'] elif coreml: # CoreML LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct @@ -179,6 +181,7 @@ class AutoBackend(nn.Module): import tensorflow as tf keras = False # assume TF1 saved_model model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + w = Path(w) / 'metadata.yaml' elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') import tensorflow as tf @@ -265,7 +268,7 @@ class AutoBackend(nn.Module): # Check names if 'names' not in locals(): # names missing - names = yaml_load(check_yaml(data))['names'] if data else {i: f'class{i}' for i in range(999)} # assign + names = self._apply_default_class_names(data) names = check_class_names(names) self.__dict__.update(locals()) # assign all variables to self @@ -324,7 +327,9 @@ class AutoBackend(nn.Module): box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - else: + elif len(y) == 1: # classification model + y = list(y.values()) + elif len(y) == 2: # segmentation model y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) @@ -337,8 +342,14 @@ class AutoBackend(nn.Module): im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) + if not isinstance(y, list): + y = [y] elif self.pb: # GraphDef y = self.frozen_func(x=self.tf.constant(im)) + if len(y) == 2 and len(self.names) == 999: # segments and names not defined + ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes + nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400) + self.names = {i: f'class{i}' for i in range(nc)} else: # Lite or Edge TPU input = self.input_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model @@ -354,12 +365,16 @@ class AutoBackend(nn.Module): scale, zero_point = output['quantization'] x = (x.astype(np.float32) - zero_point) * scale # re-scale y.append(x) - # TF segment fixes: export is reversed vs ONNX export and protos are transposed - if len(self.output_details) == 2: # segment - y = [y[1], np.transpose(y[0], (0, 3, 1, 2))] + # TF segment fixes: export is reversed vs ONNX export and protos are transposed + if len(y) == 2: # segment with (det, proto) output order reversed + if len(y[1].shape) != 4: + y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) + y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] # y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels + # for x in y: + # print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes if isinstance(y, (list, tuple)): return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] else: @@ -375,7 +390,7 @@ class AutoBackend(nn.Module): Returns: (torch.Tensor): The converted tensor """ - return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x + return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=(1, 3, 640, 640)): """ diff --git a/ultralytics/nn/autoshape.py b/ultralytics/nn/autoshape.py index f3eb9562f9..654a688b57 100644 --- a/ultralytics/nn/autoshape.py +++ b/ultralytics/nn/autoshape.py @@ -79,7 +79,7 @@ class AutoShape(nn.Module): with amp.autocast(autocast): return self.model(ims.to(p.device).type_as(p), augment=augment) # inference - # Pre-process + # Preprocess n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(ims): @@ -108,7 +108,7 @@ class AutoShape(nn.Module): with dt[1]: y = self.model(x, augment=augment) # forward - # Post-process + # Postprocess with dt[2]: y = non_max_suppression(y if self.dmb else y[0], self.conf, @@ -181,7 +181,7 @@ class Detections: self.ims[i] = np.asarray(im) if pprint: s = s.lstrip('\n') - return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t + return f'{s}\nSpeed: %.1fms preprocess, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') diff --git a/ultralytics/nn/modules.py b/ultralytics/nn/modules.py index 2bbf338bb8..7494028511 100644 --- a/ultralytics/nn/modules.py +++ b/ultralytics/nn/modules.py @@ -174,7 +174,7 @@ class C2(nn.Module): self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))) def forward(self, x): - a, b = self.cv1(x).split((self.c, self.c), 1) + a, b = self.cv1(x).chunk(2, 1) return self.cv2(torch.cat((self.m(a), b), 1)) @@ -188,6 +188,11 @@ class C2f(nn.Module): self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) def forward(self, x): + y = list(self.cv1(x).chunk(2, 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + def forward_split(self, x): y = list(self.cv1(x).split((self.c, self.c), 1)) y.extend(m(y[-1]) for m in self.m) return self.cv2(torch.cat(y, 1)) @@ -405,7 +410,12 @@ class Detect(nn.Module): self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) self.shape = shape - box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1) + if self.export and self.format == 'edgetpu': # FlexSplitV ops issue + x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) + box = x_cat[:, :self.reg_max * 4] + cls = x_cat[:, self.reg_max * 4:] + else: + box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1) dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides y = torch.cat((dbox, cls.sigmoid()), 1) return y if self.export else (y, x) diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py index e3702d081d..6a663f0865 100644 --- a/ultralytics/nn/tasks.py +++ b/ultralytics/nn/tasks.py @@ -1,6 +1,5 @@ # Ultralytics YOLO 🚀, GPL-3.0 license -import ast import contextlib from copy import deepcopy from pathlib import Path @@ -338,7 +337,7 @@ def torch_safe_load(weight): file = attempt_download_asset(weight) # search online if missing locally try: - return torch.load(file, map_location='cpu') # load + return torch.load(file, map_location='cpu'), file # load except ModuleNotFoundError as e: if e.name == 'omegaconf': # e.name is missing module name LOGGER.warning(f'WARNING ⚠️ {weight} requires {e.name}, which is not in ultralytics requirements.' @@ -347,7 +346,7 @@ def torch_safe_load(weight): f'download updated models from https://github.com/ultralytics/assets/releases/tag/v0.0.0') if e.name != 'models': check_requirements(e.name) # install missing module - return torch.load(file, map_location='cpu') # load + return torch.load(file, map_location='cpu'), file # load def attempt_load_weights(weights, device=None, inplace=True, fuse=False): @@ -355,13 +354,13 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=False): ensemble = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - ckpt = torch_safe_load(w) # load ckpt + ckpt, w = torch_safe_load(w) # load ckpt args = {**DEFAULT_CFG_DICT, **ckpt['train_args']} # combine model and default args, preferring model args model = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model # Model compatibility updates model.args = args # attach args to model - model.pt_path = weights # attach *.pt file path to model + model.pt_path = w # attach *.pt file path to model model.task = guess_model_task(model) if not hasattr(model, 'stride'): model.stride = torch.tensor([32.]) @@ -392,7 +391,7 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=False): def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False): # Loads a single model weights - ckpt = torch_safe_load(weight) # load ckpt + ckpt, weight = torch_safe_load(weight) # load ckpt args = {**DEFAULT_CFG_DICT, **ckpt['train_args']} # combine model and default args, preferring model args model = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model diff --git a/ultralytics/tracker/README.md b/ultralytics/tracker/README.md index 710a6b7f2e..fcf5c64c9e 100644 --- a/ultralytics/tracker/README.md +++ b/ultralytics/tracker/README.md @@ -29,4 +29,5 @@ yolo segment track source=... tracker=... ``` By default, trackers will use the configuration in `ultralytics/tracker/cfg`. -We also support using a modified tracker config file. Please refer to the tracker config files in `ultralytics/tracker/cfg`. +We also support using a modified tracker config file. Please refer to the tracker config files +in `ultralytics/tracker/cfg`. diff --git a/ultralytics/tracker/track.py b/ultralytics/tracker/track.py index cbce60f4d0..be16e6ba01 100644 --- a/ultralytics/tracker/track.py +++ b/ultralytics/tracker/track.py @@ -12,7 +12,7 @@ def on_predict_start(predictor): tracker = check_yaml(predictor.args.tracker) cfg = IterableSimpleNamespace(**yaml_load(tracker)) assert cfg.tracker_type in ['bytetrack', 'botsort'], \ - f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'" + f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'" trackers = [] for _ in range(predictor.dataset.bs): tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) diff --git a/ultralytics/yolo/cfg/__init__.py b/ultralytics/yolo/cfg/__init__.py index ed03dc9a58..2004966190 100644 --- a/ultralytics/yolo/cfg/__init__.py +++ b/ultralytics/yolo/cfg/__init__.py @@ -256,7 +256,7 @@ def entrypoint(debug=''): # Defaults task2model = dict(detect='yolov8n.pt', segment='yolov8n-seg.pt', classify='yolov8n-cls.pt') - task2data = dict(detect='coco128.yaml', segment='coco128-seg.yaml', classify='mnist160') + task2data = dict(detect='coco128.yaml', segment='coco128-seg.yaml', classify='imagenet100') # Mode mode = overrides.get('mode', None) diff --git a/ultralytics/yolo/data/augment.py b/ultralytics/yolo/data/augment.py index 1dedd3d38e..6f3179911c 100644 --- a/ultralytics/yolo/data/augment.py +++ b/ultralytics/yolo/data/augment.py @@ -583,9 +583,10 @@ class Albumentations: # TODO: add supports of segments and keypoints if self.transform and random.random() < self.p: new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed - labels['img'] = new['image'] - labels['cls'] = np.array(new['class_labels']) - bboxes = np.array(new['bboxes']) + if len(new['class_labels']) > 0: # skip update if no bbox in new im + labels['img'] = new['image'] + labels['cls'] = np.array(new['class_labels']) + bboxes = np.array(new['bboxes']) labels['instances'].update(bboxes=bboxes) return labels diff --git a/ultralytics/yolo/engine/exporter.py b/ultralytics/yolo/engine/exporter.py index 93e40d4905..cbe0046309 100644 --- a/ultralytics/yolo/engine/exporter.py +++ b/ultralytics/yolo/engine/exporter.py @@ -18,29 +18,28 @@ TensorFlow.js | `tfjs` | yolov8n_web_model/ PaddlePaddle | `paddle` | yolov8n_paddle_model/ Requirements: - $ pip install -r requirements.txt coremltools onnx onnxsim onnxruntime openvino-dev tensorflow-cpu # CPU - $ pip install -r requirements.txt coremltools onnx onnxsim onnxruntime-gpu openvino-dev tensorflow # GPU + $ pip install ultralytics[export] Python: from ultralytics import YOLO - model = YOLO('yolov8n.yaml') + model = YOLO('yolov8n.pt') results = model.export(format='onnx') CLI: $ yolo mode=export model=yolov8n.pt format=onnx Inference: - $ python detect.py --weights yolov8n.pt # PyTorch - yolov8n.torchscript # TorchScript - yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov8n_openvino_model # OpenVINO - yolov8n.engine # TensorRT - yolov8n.mlmodel # CoreML (macOS-only) - yolov8n_saved_model # TensorFlow SavedModel - yolov8n.pb # TensorFlow GraphDef - yolov8n.tflite # TensorFlow Lite - yolov8n_edgetpu.tflite # TensorFlow Edge TPU - yolov8n_paddle_model # PaddlePaddle + $ yolo predict model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example @@ -64,12 +63,12 @@ import pandas as pd import torch from ultralytics.nn.autobackend import check_class_names -from ultralytics.nn.modules import Detect, Segment +from ultralytics.nn.modules import C2f, Detect, Segment from ultralytics.nn.tasks import DetectionModel, SegmentationModel from ultralytics.yolo.cfg import get_cfg from ultralytics.yolo.data.dataloaders.stream_loaders import LoadImages from ultralytics.yolo.data.utils import IMAGENET_MEAN, IMAGENET_STD, check_det_dataset -from ultralytics.yolo.utils import (DEFAULT_CFG, LINUX, LOGGER, MACOS, WINDOWS, __version__, callbacks, colorstr, +from ultralytics.yolo.utils import (DEFAULT_CFG, LINUX, LOGGER, MACOS, __version__, callbacks, colorstr, get_default_args, yaml_save) from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version, check_yaml from ultralytics.yolo.utils.files import file_size @@ -77,6 +76,7 @@ from ultralytics.yolo.utils.ops import Profile from ultralytics.yolo.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode CUDA = torch.cuda.is_available() +ARM64 = platform.machine() in ('arm64', 'aarch64') def export_formats(): @@ -157,11 +157,10 @@ class Exporter: # Load PyTorch model self.device = select_device('cpu' if self.args.device is None else self.args.device) - if self.args.half: - if self.device.type == 'cpu' and not coreml and not xml: - LOGGER.info('half=True only compatible with GPU or CoreML export, i.e. use device=0 or format=coreml') - self.args.half = False - assert not self.args.dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic' + if self.args.half and onnx and self.device.type == 'cpu': + LOGGER.warning('WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0') + self.args.half = False + assert not self.args.dynamic, 'half=True not compatible with dynamic=True, i.e. use only one.' # Checks model.names = check_class_names(model.names) @@ -188,11 +187,15 @@ class Exporter: if isinstance(m, (Detect, Segment)): m.dynamic = self.args.dynamic m.export = True + m.format = self.args.format + elif isinstance(m, C2f) and not edgetpu: + # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph + m.forward = m.forward_split y = None for _ in range(2): y = model(im) # dry runs - if self.args.half and not coreml and not xml: + if self.args.half and (engine or onnx) and self.device.type != 'cpu': im, model = im.half(), model.half() # to FP16 # Warnings @@ -207,7 +210,7 @@ class Exporter: self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y) self.pretty_name = self.file.stem.replace('yolo', 'YOLO') self.metadata = { - 'description': f'Ultralytics {self.pretty_name} model trained on {self.args.data}', + 'description': f'Ultralytics {self.pretty_name} model trained on {Path(self.args.data).name}', 'author': 'Ultralytics', 'license': 'GPL-3.0 https://ultralytics.com/license', 'version': __version__, @@ -233,19 +236,16 @@ class Exporter: LOGGER.warning('WARNING ⚠️ YOLOv8 TensorFlow export is still under development. ' 'Please consider contributing to the effort if you have TF expertise. Thank you!') nms = False + self.args.int8 |= edgetpu f[5], s_model = self._export_saved_model(nms=nms or self.args.agnostic_nms or tfjs, agnostic_nms=self.args.agnostic_nms or tfjs) if pb or tfjs: # pb prerequisite to tfjs f[6], _ = self._export_pb(s_model) - if tflite or edgetpu: - f[7] = str(Path(f[5]) / (self.file.stem + '_float16.tflite')) - # f[7], _ = self._export_tflite(s_model, - # int8=self.args.int8 or edgetpu, - # data=self.args.data, - # nms=nms, - # agnostic_nms=self.args.agnostic_nms) + if tflite: + f[7], _ = self._export_tflite(s_model, nms=nms, agnostic_nms=self.args.agnostic_nms) if edgetpu: - f[8], _ = self._export_edgetpu(tflite_model=f[7]) + f[8], _ = self._export_edgetpu(tflite_model=str( + Path(f[5]) / (self.file.stem + '_full_integer_quant.tflite'))) # int8 in/out if tfjs: f[9], _ = self._export_tfjs() if paddle: # PaddlePaddle @@ -263,8 +263,8 @@ class Exporter: LOGGER.info( f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f'\nPredict: yolo task={model.task} mode=predict model={f} imgsz={imgsz} {data}' - f'\nValidate: yolo task={model.task} mode=val model={f} imgsz={imgsz} data={self.args.data} {s}' + f'\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {data}' + f'\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={self.args.data} {s}' f'\nVisualize: https://netron.app') self.run_callbacks('on_export_end') @@ -319,25 +319,27 @@ class Exporter: # Checks model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(self.model.stride)), 'names': self.model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) + # onnx.checker.check_model(model_onnx) # check onnx model # Simplify if self.args.simplify: try: - check_requirements('onnxsim') + check_requirements(('onnxsim', 'onnxruntime-gpu' if CUDA else 'onnxruntime')) import onnxsim LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...') - subprocess.run(f'onnxsim {f} {f}', shell=True) + # subprocess.run(f'onnxsim {f} {f}', shell=True) + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'Simplified ONNX model could not be validated' except Exception as e: LOGGER.info(f'{prefix} simplifier failure: {e}') + + # Metadata + for k, v in self.metadata.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + + onnx.save(model_onnx, f) return f, model_onnx @try_export @@ -402,7 +404,7 @@ class Exporter: if self.model.task == 'classify': bias = [-x for x in IMAGENET_MEAN] scale = 1 / 255 / (sum(IMAGENET_STD) / 3) - classifier_config = ct.ClassifierConfig(list(self.model.names.values())) + classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None else: bias = [0.0, 0.0, 0.0] scale = 1 / 255 @@ -414,10 +416,7 @@ class Exporter: classifier_config=classifier_config) bits, mode = (8, 'kmeans_lut') if self.args.int8 else (16, 'linear') if self.args.half else (32, None) if bits < 32: - if MACOS: # quantization only supported on macOS - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - LOGGER.info(f'{prefix} quantization only supported on macOS, skipping...') + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) if self.args.nms: ct_model = self._pipeline_coreml(ct_model) @@ -440,11 +439,11 @@ class Exporter: import tensorrt as trt # noqa check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0 - self._export_onnx() - onnx = self.file.with_suffix('.onnx') + self.args.simplify = True + f_onnx, _ = self._export_onnx() LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' + assert Path(f_onnx).exists(), f'failed to export ONNX file: {f_onnx}' f = self.file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: @@ -458,8 +457,8 @@ class Exporter: flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') + if not parser.parse_from_file(f_onnx): + raise RuntimeError(f'failed to load ONNX file: {f_onnx}') inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] @@ -507,77 +506,37 @@ class Exporter: try: import tensorflow as tf # noqa except ImportError: - check_requirements(f"tensorflow{'' if CUDA else '-macos' if MACOS else '-cpu' if LINUX else ''}") + check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if CUDA else '-cpu'}") import tensorflow as tf # noqa - check_requirements(('onnx', 'onnx2tf', 'sng4onnx', 'onnxsim', 'onnx_graphsurgeon', 'tflite_support'), + check_requirements(('onnx', 'onnx2tf', 'sng4onnx', 'onnxsim', 'onnx_graphsurgeon', 'tflite_support', + 'onnxruntime-gpu' if CUDA else 'onnxruntime'), cmds='--extra-index-url https://pypi.ngc.nvidia.com') LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(self.file).replace(self.file.suffix, '_saved_model') + f = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + if f.is_dir(): + import shutil + shutil.rmtree(f) # delete output folder # Export to ONNX - self._export_onnx() - onnx = self.file.with_suffix('.onnx') + self.args.simplify = True + f_onnx, _ = self._export_onnx() - # Export to TF SavedModel - subprocess.run(f'onnx2tf -i {onnx} -o {f} --non_verbose', shell=True) - yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + # Export to TF + int8 = '-oiqt -qt per-tensor' if self.args.int8 else '' + cmd = f'onnx2tf -i {f_onnx} -o {f} --non_verbose {int8}' + LOGGER.info(f'\n{prefix} running {cmd}') + subprocess.run(cmd, shell=True) + yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml # Add TFLite metadata - for file in Path(f).rglob('*.tflite'): + for file in f.rglob('*.tflite'): self._add_tflite_metadata(file) # Load saved_model keras_model = tf.saved_model.load(f, tags=None, options=None) - return f, keras_model - - @try_export - def _export_saved_model_OLD(self, - nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - prefix=colorstr('TensorFlow SavedModel:')): - # YOLOv8 TensorFlow SavedModel export - try: - import tensorflow as tf # noqa - except ImportError: - check_requirements(f"tensorflow{'' if CUDA else '-macos' if MACOS else '-cpu' if LINUX else ''}") - import tensorflow as tf # noqa - # from models.tf import TFModel - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(self.file).replace(self.file.suffix, '_saved_model') - batch_size, ch, *imgsz = list(self.im.shape) # BCHW - - tf_models = None # TODO: no TF modules available - tf_model = tf_models.TFModel(cfg=self.model.yaml, model=self.model.cpu(), nc=self.model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if self.args.dynamic else batch_size) - outputs = tf_model.predict(inputs, nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if self.args.keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if nms else frozen_func(x), [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) - if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - return f, keras_model + return str(f), keras_model @try_export def _export_pb(self, keras_model, prefix=colorstr('TensorFlow GraphDef:')): @@ -596,8 +555,18 @@ class Exporter: return f, None @try_export - def _export_tflite(self, keras_model, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): + def _export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): # YOLOv8 TensorFlow Lite export + saved_model = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + if self.args.int8: + f = saved_model / (self.file.stem + 'yolov8n_integer_quant.tflite') # fp32 in/out + elif self.args.half: + f = saved_model / (self.file.stem + '_float16.tflite') + else: + f = saved_model / (self.file.stem + '_float32.tflite') + return str(f), None # noqa + + # OLD VERSION BELOW --------------------------------------------------------------- import tensorflow as tf # noqa LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') @@ -608,7 +577,7 @@ class Exporter: converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: + if self.args.int8: def representative_dataset_gen(dataset, n_images=100): # Dataset generator for use with converter.representative_dataset, returns a generator of np arrays @@ -620,7 +589,7 @@ class Exporter: if n >= n_images: break - dataset = LoadImages(check_det_dataset(check_yaml(data))['train'], imgsz=imgsz, auto=False) + dataset = LoadImages(check_det_dataset(check_yaml(self.args.data))['train'], imgsz=imgsz, auto=False) converter.representative_dataset = lambda: representative_dataset_gen(dataset, n_images=100) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] @@ -641,7 +610,7 @@ class Exporter: cmd = 'edgetpu_compiler --version' help_url = 'https://coral.ai/docs/edgetpu/compiler/' assert LINUX, f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + if subprocess.run(f'{cmd} > /dev/null', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system for c in ( @@ -656,7 +625,7 @@ class Exporter: LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model - cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {self.file.parent} {tflite_model}' + cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {Path(f).parent} {tflite_model}' subprocess.run(cmd.split(), check=True) self._add_tflite_metadata(f) return f, None @@ -674,7 +643,7 @@ class Exporter: cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) + subprocess.run(cmd.split(), check=True) with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order subst = re.sub( @@ -698,14 +667,23 @@ class Exporter: from tflite_support import metadata as _metadata # noqa from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa - # Creates model info. + # Create model info model_meta = _metadata_fb.ModelMetadataT() model_meta.name = self.metadata['description'] model_meta.version = self.metadata['version'] model_meta.author = self.metadata['author'] model_meta.license = self.metadata['license'] - # Creates input info. + # Label file + tmp_file = file.parent / 'temp_meta.txt' + with open(tmp_file, 'w') as f: + f.write(str(self.metadata)) + + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS + + # Create input info input_meta = _metadata_fb.TensorMetadataT() input_meta.name = 'image' input_meta.description = 'Input image to be detected.' @@ -714,25 +692,21 @@ class Exporter: input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties - # Creates output info. - output_meta = _metadata_fb.TensorMetadataT() - output_meta.name = 'output' - output_meta.description = 'Coordinates of detected objects, class labels, and confidence score.' - - # Label file - tmp_file = Path('/tmp/meta.txt') - with open(tmp_file, 'w') as meta_f: - meta_f.write(str(self.metadata)) - - label_file = _metadata_fb.AssociatedFileT() - label_file.name = tmp_file.name - label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS - output_meta.associatedFiles = [label_file] - - # Creates subgraph info. + # Create output info + output1 = _metadata_fb.TensorMetadataT() + output1.name = 'output' + output1.description = 'Coordinates of detected objects, class labels, and confidence score' + output1.associatedFiles = [label_file] + if self.model.task == 'segment': + output2 = _metadata_fb.TensorMetadataT() + output2.name = 'output' + output2.description = 'Mask protos' + output2.associatedFiles = [label_file] + + # Create subgraph info subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] - subgraph.outputTensorMetadata = [output_meta] + subgraph.outputTensorMetadata = [output1, output2] if self.model.task == 'segment' else [output1] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) diff --git a/ultralytics/yolo/engine/model.py b/ultralytics/yolo/engine/model.py index 32e72e52b0..cb92c91711 100644 --- a/ultralytics/yolo/engine/model.py +++ b/ultralytics/yolo/engine/model.py @@ -29,14 +29,45 @@ MODEL_MAP = { class YOLO: """ - YOLO + YOLO (You Only Look Once) object detection model. - A python interface which emulates a model-like behaviour by wrapping trainers. - """ + Args: + model (str or Path): Path to the model file to load or create. + type (str): Type/version of models to use. Defaults to "v8". + + Attributes: + type (str): Type/version of models being used. + ModelClass (Any): Model class. + TrainerClass (Any): Trainer class. + ValidatorClass (Any): Validator class. + PredictorClass (Any): Predictor class. + predictor (Any): Predictor object. + model (Any): Model object. + trainer (Any): Trainer object. + task (str): Type of model task. + ckpt (Any): Checkpoint object if model loaded from *.pt file. + cfg (str): Model configuration if loaded from *.yaml file. + ckpt_path (str): Checkpoint file path. + overrides (dict): Overrides for trainer object. + metrics_data (Any): Data for metrics. + + Methods: + __call__(): Alias for predict method. + _new(cfg, verbose=True): Initializes a new model and infers the task type from the model definitions. + _load(weights): Initializes a new model and infers the task type from the model head. + _check_is_pytorch_model(): Raises TypeError if model is not a PyTorch model. + reset(): Resets the model modules. + info(verbose=False): Logs model info. + fuse(): Fuse model for faster inference. + predict(source=None, stream=False, **kwargs): Perform prediction using the YOLO model. + + Returns: + List[ultralytics.yolo.engine.results.Results]: The prediction results. + """ def __init__(self, model='yolov8n.pt', type='v8') -> None: """ - Initializes the YOLO object. + Initializes the YOLO model. Args: model (str, Path): model to load or create @@ -97,11 +128,12 @@ class YOLO: self.task = self.model.args['task'] self.overrides = self.model.args self._reset_ckpt_args(self.overrides) + self.ckpt_path = self.model.pt_path else: - check_file(weights) + weights = check_file(weights) self.model, self.ckpt = weights, None self.task = guess_model_task(weights) - self.ckpt_path = weights + self.ckpt_path = weights self.overrides['model'] = weights self.ModelClass, self.TrainerClass, self.ValidatorClass, self.PredictorClass = self._assign_ops_from_task() @@ -204,7 +236,6 @@ class YOLO: return validator.metrics - @smart_inference_mode() def export(self, **kwargs): """ Export model. @@ -279,6 +310,13 @@ class YOLO: """ return self.model.names if hasattr(self.model, 'names') else None + @property + def device(self): + """ + Returns device if PyTorch model + """ + return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None + @property def transforms(self): """ @@ -293,7 +331,6 @@ class YOLO: """ if not self.metrics_data: LOGGER.info('No metrics data found! Run training or validation operation first.') - return self.metrics_data @staticmethod @@ -306,7 +343,7 @@ class YOLO: @staticmethod def _reset_ckpt_args(args): for arg in 'augment', 'verbose', 'project', 'name', 'exist_ok', 'resume', 'batch', 'epochs', 'cache', \ - 'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots', 'opset': + 'save_json', 'half', 'v5loader', 'device', 'cfg', 'save', 'rect', 'plots', 'opset', 'simplify': args.pop(arg, None) @staticmethod diff --git a/ultralytics/yolo/engine/predictor.py b/ultralytics/yolo/engine/predictor.py index 6151122672..9f4857d635 100644 --- a/ultralytics/yolo/engine/predictor.py +++ b/ultralytics/yolo/engine/predictor.py @@ -1,30 +1,32 @@ # Ultralytics YOLO 🚀, GPL-3.0 license """ Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc. + Usage - sources: - $ yolo task=... mode=predict model=s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - list.txt # list of images - list.streams # list of streams - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + $ yolo mode=predict model=yolov8n.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + Usage - formats: - $ yolo task=... mode=predict --weights yolov8n.pt # PyTorch - yolov8n.torchscript # TorchScript - yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov8n_openvino_model # OpenVINO - yolov8n.engine # TensorRT - yolov8n.mlmodel # CoreML (macOS-only) - yolov8n_saved_model # TensorFlow SavedModel - yolov8n.pb # TensorFlow GraphDef - yolov8n.tflite # TensorFlow Lite - yolov8n_edgetpu.tflite # TensorFlow Edge TPU - yolov8n_paddle_model # PaddlePaddle - """ + $ yolo mode=predict model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle +""" import platform from collections import defaultdict from pathlib import Path @@ -200,9 +202,9 @@ class BasePredictor: # Print results if self.args.verbose and self.seen: t = tuple(x.t / self.seen * 1E3 for x in self.dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms postprocess per image at shape ' + LOGGER.info(f'Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape ' f'{(1, 3, *self.imgsz)}' % t) - if self.args.save_txt or self.args.save: + if self.args.save or self.args.save_txt or self.args.save_crop: nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}") diff --git a/ultralytics/yolo/engine/results.py b/ultralytics/yolo/engine/results.py index a4dd5995af..702220dd1a 100644 --- a/ultralytics/yolo/engine/results.py +++ b/ultralytics/yolo/engine/results.py @@ -4,7 +4,6 @@ from functools import lru_cache import numpy as np import torch import torchvision.transforms.functional as F -from PIL import Image from ultralytics.yolo.utils import LOGGER, ops from ultralytics.yolo.utils.plotting import Annotator, colors @@ -136,7 +135,7 @@ class Results: img = deepcopy(self.orig_img) annotator = Annotator(img, line_width, font_size, font, pil, example) boxes = self.boxes - masks = self.masks.data + masks = self.masks logits = self.probs names = self.names if boxes is not None: diff --git a/ultralytics/yolo/engine/trainer.py b/ultralytics/yolo/engine/trainer.py index 8b1ac450de..4fc09addd2 100644 --- a/ultralytics/yolo/engine/trainer.py +++ b/ultralytics/yolo/engine/trainer.py @@ -1,8 +1,10 @@ # Ultralytics YOLO 🚀, GPL-3.0 license """ -Simple training loop; Boilerplate that could apply to any arbitrary neural network, -""" +Train a model on a dataset +Usage: + $ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16 +""" import os import subprocess import time diff --git a/ultralytics/yolo/engine/validator.py b/ultralytics/yolo/engine/validator.py index ab8044f31f..ae29b04091 100644 --- a/ultralytics/yolo/engine/validator.py +++ b/ultralytics/yolo/engine/validator.py @@ -1,5 +1,23 @@ # Ultralytics YOLO 🚀, GPL-3.0 license - +""" +Check a model's accuracy on a test or val split of a dataset + +Usage: + $ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640 + +Usage - formats: + $ yolo mode=val model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle +""" import json from collections import defaultdict from pathlib import Path @@ -105,8 +123,7 @@ class BaseValidator: self.device = model.device if not pt and not jit: self.args.batch = 1 # export.py models default to batch-size 1 - self.logger.info( - f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + self.logger.info(f'Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') if isinstance(self.args.data, str) and self.args.data.endswith('.yaml'): self.data = check_det_dataset(self.args.data) @@ -136,7 +153,7 @@ class BaseValidator: for batch_i, batch in enumerate(bar): self.run_callbacks('on_val_batch_start') self.batch_i = batch_i - # pre-process + # preprocess with dt[0]: batch = self.preprocess(batch) @@ -149,7 +166,7 @@ class BaseValidator: if self.training: self.loss += trainer.criterion(preds, batch)[1] - # pre-process predictions + # postprocess with dt[3]: preds = self.postprocess(preds) @@ -163,13 +180,14 @@ class BaseValidator: self.check_stats(stats) self.print_results() self.speed = tuple(x.t / len(self.dataloader.dataset) * 1E3 for x in dt) # speeds per image + self.finalize_metrics() self.run_callbacks('on_val_end') if self.training: model.float() results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix='val')} return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats else: - self.logger.info('Speed: %.1fms pre-process, %.1fms inference, %.1fms loss, %.1fms post-process per image' % + self.logger.info('Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image' % self.speed) if self.args.save_json and self.jdict: with open(str(self.save_dir / 'predictions.json'), 'w') as f: @@ -197,6 +215,9 @@ class BaseValidator: def update_metrics(self, preds, batch): pass + def finalize_metrics(self, *args, **kwargs): + pass + def get_stats(self): return {} diff --git a/ultralytics/yolo/utils/__init__.py b/ultralytics/yolo/utils/__init__.py index 1f88cb5f35..3bbf6c13ce 100644 --- a/ultralytics/yolo/utils/__init__.py +++ b/ultralytics/yolo/utils/__init__.py @@ -97,6 +97,7 @@ HELP_MSG = \ torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 pd.options.display.max_columns = 10 +pd.options.display.width = 120 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training @@ -287,9 +288,7 @@ def is_pytest_running(): Returns: (bool): True if pytest is running, False otherwise. """ - with contextlib.suppress(Exception): - return 'pytest' in sys.modules - return False + return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem) def is_github_actions_ci() -> bool: @@ -530,8 +529,7 @@ def set_sentry(): if SETTINGS['sync'] and \ RANK in {-1, 0} and \ Path(sys.argv[0]).name == 'yolo' and \ - not is_pytest_running() and \ - not is_github_actions_ci() and \ + not TESTS_RUNNING and \ ((is_pip_package() and not is_git_dir()) or (get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git' and get_git_branch() == 'main')): @@ -625,4 +623,5 @@ SETTINGS = get_settings() DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \ 'Docker' if is_docker() else platform.system() +TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() set_sentry() diff --git a/ultralytics/yolo/utils/benchmarks.py b/ultralytics/yolo/utils/benchmarks.py new file mode 100644 index 0000000000..bfbee7620f --- /dev/null +++ b/ultralytics/yolo/utils/benchmarks.py @@ -0,0 +1,101 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Benchmark a YOLO model formats for speed and accuracy + +Usage: + from ultralytics.yolo.utils.benchmarks import run_benchmarks + run_benchmarks(model='yolov8n.pt', imgsz=160) + +Format | `format=argument` | Model +--- | --- | --- +PyTorch | - | yolov8n.pt +TorchScript | `torchscript` | yolov8n.torchscript +ONNX | `onnx` | yolov8n.onnx +OpenVINO | `openvino` | yolov8n_openvino_model/ +TensorRT | `engine` | yolov8n.engine +CoreML | `coreml` | yolov8n.mlmodel +TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ +TensorFlow GraphDef | `pb` | yolov8n.pb +TensorFlow Lite | `tflite` | yolov8n.tflite +TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov8n_web_model/ +PaddlePaddle | `paddle` | yolov8n_paddle_model/ +""" + +import platform +import time +from pathlib import Path + +import pandas as pd +import torch + +from ultralytics import YOLO +from ultralytics.yolo.engine.exporter import export_formats +from ultralytics.yolo.utils import LOGGER, SETTINGS +from ultralytics.yolo.utils.checks import check_yolo +from ultralytics.yolo.utils.files import file_size + + +def run_benchmarks(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', + imgsz=640, + half=False, + device='cpu', + hard_fail=False): + device = torch.device(int(device) if device.isnumeric() else device) + model = YOLO(model) + + y = [] + t0 = time.time() + for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU) + try: + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML + + if 'cpu' in device.type: + assert cpu, 'inference not supported on CPU' + if 'cuda' in device.type: + assert gpu, 'inference not supported on GPU' + + # Export + if format == '-': + filename = model.ckpt_path + export = model # PyTorch format + else: + filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others + export = YOLO(filename) + assert suffix in str(filename), 'export failed' + + # Validate + if model.task == 'detect': + data, key = 'coco128.yaml', 'metrics/mAP50-95(B)' + elif model.task == 'segment': + data, key = 'coco128-seg.yaml', 'metrics/mAP50-95(M)' + elif model.task == 'classify': + data, key = 'imagenet100', 'metrics/accuracy_top5' + + results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False) + metric, speed = results.results_dict[key], results.speed['inference'] + y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) + except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' + LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}') + y.append([name, '❌', None, None, None]) # mAP, t_inference + + # Print results + LOGGER.info('\n') + check_yolo(device=device) # print system info + c = ['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'] if map else ['Format', 'Export', '', ''] + df = pd.DataFrame(y, columns=c) + LOGGER.info(f'\nBenchmarks complete for {Path(model.ckpt_path).name} on {data} at imgsz={imgsz} ' + f'({time.time() - t0:.2f}s)') + LOGGER.info(str(df if map else df.iloc[:, :2])) + + if hard_fail and isinstance(hard_fail, str): + metrics = df[key].array # values to compare to floor + floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: metric < floor {floor}' + + +if __name__ == '__main__': + run_benchmarks() diff --git a/ultralytics/yolo/utils/callbacks/clearml.py b/ultralytics/yolo/utils/callbacks/clearml.py index 2a2e5f9c18..bd2d5e2226 100644 --- a/ultralytics/yolo/utils/callbacks/clearml.py +++ b/ultralytics/yolo/utils/callbacks/clearml.py @@ -1,5 +1,5 @@ # Ultralytics YOLO 🚀, GPL-3.0 license - +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params try: @@ -7,6 +7,7 @@ try: from clearml import Task assert clearml.__version__ # verify package is not directory + assert not TESTS_RUNNING # do not log pytest except (ImportError, AssertionError): clearml = None @@ -19,14 +20,16 @@ def _log_images(imgs_dict, group='', step=0): def on_pretrain_routine_start(trainer): - # TODO: reuse existing task - task = Task.init(project_name=trainer.args.project or 'YOLOv8', - task_name=trainer.args.name, - tags=['YOLOv8'], - output_uri=True, - reuse_last_task_id=False, - auto_connect_frameworks={'pytorch': False}) - task.connect(vars(trainer.args), name='General') + try: + task = Task.init(project_name=trainer.args.project or 'YOLOv8', + task_name=trainer.args.name, + tags=['YOLOv8'], + output_uri=True, + reuse_last_task_id=False, + auto_connect_frameworks={'pytorch': False}) + task.connect(vars(trainer.args), name='General') + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ ClearML not initialized correctly, not logging this run. {e}') def on_train_epoch_end(trainer): @@ -35,18 +38,19 @@ def on_train_epoch_end(trainer): def on_fit_epoch_end(trainer): - if trainer.epoch == 0: + task = Task.current_task() + if task and trainer.epoch == 0: model_info = { 'Parameters': get_num_params(trainer.model), 'GFLOPs': round(get_flops(trainer.model), 3), 'Inference speed (ms/img)': round(trainer.validator.speed[1], 3)} - Task.current_task().connect(model_info, name='Model') + task.connect(model_info, name='Model') def on_train_end(trainer): - Task.current_task().update_output_model(model_path=str(trainer.best), - model_name=trainer.args.name, - auto_delete_file=False) + task = Task.current_task() + if task: + task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False) callbacks = { diff --git a/ultralytics/yolo/utils/callbacks/comet.py b/ultralytics/yolo/utils/callbacks/comet.py index 120c028cb7..d33871c0b9 100644 --- a/ultralytics/yolo/utils/callbacks/comet.py +++ b/ultralytics/yolo/utils/callbacks/comet.py @@ -1,41 +1,49 @@ # Ultralytics YOLO 🚀, GPL-3.0 license - +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params try: import comet_ml -except ImportError: + assert not TESTS_RUNNING # do not log pytest + assert comet_ml.__version__ # verify package is not directory +except (ImportError, AssertionError): comet_ml = None def on_pretrain_routine_start(trainer): - experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8') - experiment.log_parameters(vars(trainer.args)) + try: + experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8') + experiment.log_parameters(vars(trainer.args)) + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ Comet not initialized correctly, not logging this run. {e}') def on_train_epoch_end(trainer): experiment = comet_ml.get_global_experiment() - experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1) - if trainer.epoch == 1: - for f in trainer.save_dir.glob('train_batch*.jpg'): - experiment.log_image(f, name=f.stem, step=trainer.epoch + 1) + if experiment: + experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1) + if trainer.epoch == 1: + for f in trainer.save_dir.glob('train_batch*.jpg'): + experiment.log_image(f, name=f.stem, step=trainer.epoch + 1) def on_fit_epoch_end(trainer): experiment = comet_ml.get_global_experiment() - experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1) - if trainer.epoch == 0: - model_info = { - 'model/parameters': get_num_params(trainer.model), - 'model/GFLOPs': round(get_flops(trainer.model), 3), - 'model/speed(ms)': round(trainer.validator.speed[1], 3)} - experiment.log_metrics(model_info, step=trainer.epoch + 1) + if experiment: + experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1) + if trainer.epoch == 0: + model_info = { + 'model/parameters': get_num_params(trainer.model), + 'model/GFLOPs': round(get_flops(trainer.model), 3), + 'model/speed(ms)': round(trainer.validator.speed[1], 3)} + experiment.log_metrics(model_info, step=trainer.epoch + 1) def on_train_end(trainer): experiment = comet_ml.get_global_experiment() - experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True) + if experiment: + experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True) callbacks = { diff --git a/ultralytics/yolo/utils/callbacks/hub.py b/ultralytics/yolo/utils/callbacks/hub.py index 3f1a981fa8..f9943a6d33 100644 --- a/ultralytics/yolo/utils/callbacks/hub.py +++ b/ultralytics/yolo/utils/callbacks/hub.py @@ -4,11 +4,11 @@ import json from time import time from ultralytics.hub.utils import PREFIX, traces -from ultralytics.yolo.utils import LOGGER +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING def on_pretrain_routine_end(trainer): - session = getattr(trainer, 'hub_session', None) + session = not TESTS_RUNNING and getattr(trainer, 'hub_session', None) if session: # Start timer for upload rate limit LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀') diff --git a/ultralytics/yolo/utils/checks.py b/ultralytics/yolo/utils/checks.py index fd7359ed7f..aaf6f25959 100644 --- a/ultralytics/yolo/utils/checks.py +++ b/ultralytics/yolo/utils/checks.py @@ -194,8 +194,12 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=() try: pkg.require(r) except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - s += f'"{r}" ' - n += 1 + try: # attempt to import (slower but more accurate) + import importlib + importlib.import_module(next(pkg.parse_requirements(r)).name) + except ImportError: + s += f'"{r}" ' + n += 1 if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") @@ -250,7 +254,7 @@ def check_file(file, suffix='', download=True): return file else: # search files = [] - for d in 'models', 'yolo/data', 'tracker/cfg': # search directories + for d in 'models', 'datasets', 'tracker/cfg': # search directories files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file if not files: raise FileNotFoundError(f"'{file}' does not exist") @@ -280,7 +284,7 @@ def check_imshow(warn=False): return False -def check_yolo(verbose=True): +def check_yolo(verbose=True, device=''): from ultralytics.yolo.utils.torch_utils import select_device if is_colab(): @@ -298,7 +302,7 @@ def check_yolo(verbose=True): else: s = '' - select_device(newline=False) + select_device(device=device, newline=False) LOGGER.info(f'Setup complete ✅ {s}') diff --git a/ultralytics/yolo/utils/metrics.py b/ultralytics/yolo/utils/metrics.py index 277c487698..de0deb0e3c 100644 --- a/ultralytics/yolo/utils/metrics.py +++ b/ultralytics/yolo/utils/metrics.py @@ -512,6 +512,7 @@ class DetMetrics: self.plot = plot self.names = names self.box = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} def process(self, tp, conf, pred_cls, target_cls): results = ap_per_class(tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir, @@ -554,6 +555,7 @@ class SegmentMetrics: self.names = names self.box = Metric() self.seg = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} def process(self, tp_m, tp_b, conf, pred_cls, target_cls): results_mask = ap_per_class(tp_m, @@ -612,6 +614,7 @@ class ClassifyMetrics: def __init__(self) -> None: self.top1 = 0 self.top5 = 0 + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} def process(self, targets, pred): # target classes and predicted classes diff --git a/ultralytics/yolo/utils/plotting.py b/ultralytics/yolo/utils/plotting.py index 4e3af60f8c..ddbc8308f6 100644 --- a/ultralytics/yolo/utils/plotting.py +++ b/ultralytics/yolo/utils/plotting.py @@ -154,7 +154,7 @@ class Annotator: def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) + xyxy = torch.Tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes if square: b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square diff --git a/ultralytics/yolo/utils/tal.py b/ultralytics/yolo/utils/tal.py index 45b4c16abb..cf48a2f6f0 100644 --- a/ultralytics/yolo/utils/tal.py +++ b/ultralytics/yolo/utils/tal.py @@ -223,7 +223,7 @@ def make_anchors(feats, strides, grid_cell_offset=0.5): def dist2bbox(distance, anchor_points, xywh=True, dim=-1): """Transform distance(ltrb) to box(xywh or xyxy).""" - lt, rb = torch.split(distance, 2, dim) + lt, rb = distance.chunk(2, dim) x1y1 = anchor_points - lt x2y2 = anchor_points + rb if xywh: @@ -235,5 +235,5 @@ def dist2bbox(distance, anchor_points, xywh=True, dim=-1): def bbox2dist(anchor_points, bbox, reg_max): """Transform bbox(xyxy) to dist(ltrb).""" - x1y1, x2y2 = torch.split(bbox, 2, -1) + x1y1, x2y2 = bbox.chunk(2, -1) return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb) diff --git a/ultralytics/yolo/v8/classify/predict.py b/ultralytics/yolo/v8/classify/predict.py index 5e5fbe60a0..596190da2d 100644 --- a/ultralytics/yolo/v8/classify/predict.py +++ b/ultralytics/yolo/v8/classify/predict.py @@ -22,7 +22,7 @@ class ClassificationPredictor(BasePredictor): results = [] for i, pred in enumerate(preds): orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img - results.append(Results(probs=pred.softmax(0), orig_img=orig_img, names=self.model.names)) + results.append(Results(probs=pred, orig_img=orig_img, names=self.model.names)) return results diff --git a/ultralytics/yolo/v8/classify/val.py b/ultralytics/yolo/v8/classify/val.py index 55a2fb82e6..01cf30934c 100644 --- a/ultralytics/yolo/v8/classify/val.py +++ b/ultralytics/yolo/v8/classify/val.py @@ -30,6 +30,9 @@ class ClassificationValidator(BaseValidator): self.pred.append(preds.argsort(1, descending=True)[:, :5]) self.targets.append(batch['cls']) + def finalize_metrics(self, *args, **kwargs): + self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed)) + def get_stats(self): self.metrics.process(self.targets, self.pred) return self.metrics.results_dict diff --git a/ultralytics/yolo/v8/detect/val.py b/ultralytics/yolo/v8/detect/val.py index 43c625c66e..974ed763c7 100644 --- a/ultralytics/yolo/v8/detect/val.py +++ b/ultralytics/yolo/v8/detect/val.py @@ -111,6 +111,9 @@ class DetectionValidator(BaseValidator): # if self.args.save_txt: # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + def finalize_metrics(self, *args, **kwargs): + self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed)) + def get_stats(self): stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy if len(stats) and stats[0].any(): diff --git a/ultralytics/yolo/v8/segment/val.py b/ultralytics/yolo/v8/segment/val.py index d81cfb13aa..65ee4b6d30 100644 --- a/ultralytics/yolo/v8/segment/val.py +++ b/ultralytics/yolo/v8/segment/val.py @@ -1,6 +1,5 @@ # Ultralytics YOLO 🚀, GPL-3.0 license -import os from multiprocessing.pool import ThreadPool from pathlib import Path @@ -10,7 +9,7 @@ import torch.nn.functional as F from ultralytics.yolo.utils import DEFAULT_CFG, NUM_THREADS, ops from ultralytics.yolo.utils.checks import check_requirements -from ultralytics.yolo.utils.metrics import ConfusionMatrix, SegmentMetrics, box_iou, mask_iou +from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou from ultralytics.yolo.utils.plotting import output_to_target, plot_images from ultralytics.yolo.v8.detect import DetectionValidator @@ -120,6 +119,9 @@ class SegmentationValidator(DetectionValidator): # if self.args.save_txt: # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + def finalize_metrics(self, *args, **kwargs): + self.metrics.speed = dict(zip(self.metrics.speed.keys(), self.speed)) + def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix