diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 0631213715..5f02211de5 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -52,9 +52,9 @@ body: - type: textarea attributes: label: Environment - description: Many issues are often related to dependency versions and hardware. Please provide the output of `yolo checks` or `ultralytics.checks()` command to help us diagnose the problem. + description: Many issues are often related to dependency versions and hardware. Please provide the output of `yolo checks` (CLI) or `ultralytics.utils.checks.collect_system_info()` (Python) command to help us diagnose the problem. placeholder: | - Paste output of `yolo checks` or `ultralytics.checks()` command, i.e.: + Paste output of `yolo checks` (CLI) or `ultralytics.utils.checks.collect_system_info()` (Python) command, i.e.: ``` Ultralytics 8.3.2 🚀 Python-3.11.2 torch-2.4.1 CPU (Apple M3) Setup complete ✅ (8 CPUs, 16.0 GB RAM, 266.5/460.4 GB disk) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9b1c5364a6..cd574204f5 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -52,16 +52,15 @@ jobs: - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: "pip" # caching pip dependencies + - uses: astral-sh/setup-uv@v3 - name: Install requirements shell: bash # for Windows compatibility run: | - python -m pip install --upgrade pip wheel - pip install . --extra-index-url https://download.pytorch.org/whl/cpu + uv pip install --system . --extra-index-url https://download.pytorch.org/whl/cpu - name: Check environment run: | yolo checks - pip list + uv pip list - name: Test HUB training shell: python env: @@ -111,6 +110,7 @@ jobs: - name: Install requirements shell: bash # for Windows compatibility run: | + # Warnings: uv causes numpy errors during benchmarking python -m pip install --upgrade pip wheel pip install -e ".[export]" "coverage[toml]" --extra-index-url https://download.pytorch.org/whl/cpu - name: Check environment @@ -143,7 +143,7 @@ jobs: coverage xml -o coverage-benchmarks.xml - name: Upload Coverage Reports to CodeCov if: github.repository == 'ultralytics/ultralytics' - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: flags: Benchmarks env: @@ -172,12 +172,11 @@ jobs: - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: "pip" # caching pip dependencies + - uses: astral-sh/setup-uv@v3 - name: Install requirements shell: bash # for Windows compatibility run: | # CoreML must be installed before export due to protobuf error from AutoInstall - python -m pip install --upgrade pip wheel slow="" torch="" if [ "${{ matrix.torch }}" == "1.8.0" ]; then @@ -186,11 +185,11 @@ jobs: if [[ "${{ github.event_name }}" =~ ^(schedule|workflow_dispatch)$ ]]; then slow="pycocotools mlflow" fi - pip install -e ".[export]" $torch $slow pytest-cov --extra-index-url https://download.pytorch.org/whl/cpu + uv pip install --system -e ".[export]" $torch $slow pytest-cov --extra-index-url https://download.pytorch.org/whl/cpu - name: Check environment run: | yolo checks - pip list + uv pip list - name: Pytest tests shell: bash # for Windows compatibility run: | @@ -201,7 +200,7 @@ jobs: pytest $slow --cov=ultralytics/ --cov-report xml tests/ - name: Upload Coverage Reports to CodeCov if: github.repository == 'ultralytics/ultralytics' # && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: flags: Tests env: @@ -213,12 +212,13 @@ jobs: runs-on: gpu-latest steps: - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v3 - name: Install requirements - run: pip install . pytest-cov + run: uv pip install --system . pytest-cov - name: Check environment run: | yolo checks - pip list + uv pip list - name: Pytest tests run: | slow="" @@ -227,7 +227,7 @@ jobs: fi pytest $slow --cov=ultralytics/ --cov-report xml tests/test_cuda.py - name: Upload Coverage Reports to CodeCov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 with: flags: GPU env: @@ -294,13 +294,8 @@ jobs: channels: conda-forge,defaults channel-priority: true activate-environment: anaconda-client-env - - name: Cleanup toolcache - run: | - echo "Free space before deletion:" - df -h / - rm -rf /opt/hostedtoolcache - echo "Free space after deletion:" - df -h / + - name: Cleanup disk space + uses: ultralytics/actions/cleanup-disk@main - name: Install Linux packages run: | # Fix cv2 ImportError: 'libEGL.so.1: cannot open shared object file: No such file or directory' @@ -348,14 +343,14 @@ jobs: Summary: runs-on: ubuntu-latest - needs: [HUB, Benchmarks, Tests, GPU, RaspberryPi, Conda] # Add job names that you want to check for failure - if: always() # This ensures the job runs even if previous jobs fail + needs: [HUB, Benchmarks, Tests, GPU, RaspberryPi, Conda] + if: always() steps: - name: Check for failure and notify if: (needs.HUB.result == 'failure' || needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.GPU.result == 'failure' || needs.RaspberryPi.result == 'failure' || needs.Conda.result == 'failure' ) && github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') && github.run_attempt == '1' - uses: slackapi/slack-github-action@v1.27.0 + uses: slackapi/slack-github-action@v2.0.0 with: + webhook-type: incoming-webhook + webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} + text: " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n" diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 38f30bb1b6..26846b0b44 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -134,12 +134,12 @@ jobs: - name: Build Image if: github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true' - uses: nick-invision/retry@v3 + uses: ultralytics/actions/retry@main with: timeout_minutes: 120 - retry_wait_seconds: 60 - max_attempts: 3 # retry twice - command: | + retry_delay_seconds: 60 + retries: 2 + run: | docker build \ --platform ${{ matrix.platforms }} \ -f docker/${{ matrix.dockerfile }} \ @@ -172,7 +172,7 @@ jobs: fi if [[ "${{ matrix.tags }}" == "latest-python" ]]; then t=ultralytics/ultralytics:latest-jupyter - v=ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }}-jupyter + v=ultralytics/ultralytics:${{ steps.get_version.outputs.version }}-jupyter docker build -f docker/Dockerfile-jupyter -t $t -t $v . docker push $t if [[ "${{ steps.check_tag.outputs.new_release }}" == "true" ]]; then @@ -202,9 +202,9 @@ jobs: steps: - name: Check for failure and notify if: needs.docker.result == 'failure' && github.repository == 'ultralytics/ultralytics' && github.event_name == 'push' && github.run_attempt == '1' - uses: slackapi/slack-github-action@v1.27.0 + uses: slackapi/slack-github-action@v2.0.0 with: + webhook-type: incoming-webhook + webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} + text: " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index ea6def886a..0210da5457 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -29,7 +29,7 @@ on: jobs: Docs: if: github.repository == 'ultralytics/ultralytics' - runs-on: macos-14 + runs-on: ubuntu-latest steps: - name: Git config run: | @@ -46,9 +46,9 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.x" - cache: "pip" # caching pip dependencies + - uses: astral-sh/setup-uv@v3 - name: Install Dependencies - run: pip install ruff black tqdm minify-html mkdocs-material "mkdocstrings[python]" mkdocs-jupyter mkdocs-redirects mkdocs-ultralytics-plugin mkdocs-macros-plugin + run: uv pip install --system ruff black tqdm minify-html mkdocs-material "mkdocstrings[python]" mkdocs-jupyter mkdocs-redirects mkdocs-ultralytics-plugin mkdocs-macros-plugin - name: Ruff fixes continue-on-error: true run: ruff check --fix --unsafe-fixes --select D --ignore=D100,D104,D203,D205,D212,D213,D401,D406,D407,D413 . diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index acd2865658..28720abfba 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -15,7 +15,7 @@ on: jobs: format: - runs-on: macos-14 + runs-on: ubuntu-latest steps: - name: Run Ultralytics Formatting uses: ultralytics/actions@main diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 4dd8aa38b0..b66a7d507b 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -29,12 +29,12 @@ jobs: sudo mv lychee /usr/local/bin - name: Test Markdown and HTML links with retry - uses: nick-invision/retry@v3 + uses: ultralytics/actions/retry@main with: - timeout_minutes: 5 - retry_wait_seconds: 60 - max_attempts: 3 - command: | + timeout_minutes: 60 + retry_delay_seconds: 900 + retries: 2 + run: | lychee \ --scheme https \ --timeout 60 \ @@ -59,12 +59,12 @@ jobs: - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' - uses: nick-invision/retry@v3 + uses: ultralytics/actions/retry@main with: - timeout_minutes: 5 - retry_wait_seconds: 60 - max_attempts: 3 - command: | + timeout_minutes: 60 + retry_delay_seconds: 900 + retries: 2 + run: | lychee \ --scheme https \ --timeout 60 \ diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index bccc28332b..b1dd1e4350 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -17,7 +17,7 @@ jobs: if: github.repository == 'ultralytics/ultralytics' && github.actor == 'glenn-jocher' name: Publish runs-on: ubuntu-latest - environment: # for GitHub Deployments tab + environment: # for GitHub Deployments tab name: Release - PyPI url: https://pypi.org/p/ultralytics permissions: @@ -90,19 +90,20 @@ jobs: fi echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV echo "PR_TITLE=$PR_TITLE" >> $GITHUB_ENV + - name: Notify on Slack (Success) if: success() && github.event_name == 'push' && steps.check_pypi.outputs.increment == 'True' - uses: slackapi/slack-github-action@v1.27.0 + uses: slackapi/slack-github-action@v2.0.0 with: + webhook-type: incoming-webhook + webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} payload: | - {"text": " GitHub Actions success for ${{ github.workflow }} ✅\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* NEW `${{ github.repository }} ${{ steps.check_pypi.outputs.current_tag }}` pip package published 😃\n*Job Status:* ${{ job.status }}\n*Pull Request:* ${{ env.PR_TITLE }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} + text: " GitHub Actions success for ${{ github.workflow }} ✅\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* NEW `${{ github.repository }} ${{ steps.check_pypi.outputs.current_tag }}` pip package published 😃\n*Job Status:* ${{ job.status }}\n*Pull Request:* ${{ env.PR_TITLE }}\n" - name: Notify on Slack (Failure) if: failure() - uses: slackapi/slack-github-action@v1.27.0 + uses: slackapi/slack-github-action@v2.0.0 with: + webhook-type: incoming-webhook + webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n*Pull Request:* ${{ env.PR_TITLE }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} + text: " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n*Pull Request:* ${{ env.PR_TITLE }}\n" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 991e0edd99..cfbe31ae5f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -8,7 +8,7 @@ on: permissions: pull-requests: write issues: write - + jobs: stale: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 4e0f0845b2..0d4b744d3f 100644 --- a/.gitignore +++ b/.gitignore @@ -163,6 +163,7 @@ weights/ *_openvino_model/ *_paddle_model/ *_ncnn_model/ +*_imx_model/ pnnx* # Autogenerated files for tests diff --git a/docker/Dockerfile b/docker/Dockerfile index 931326f891..a25fbdcce5 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -56,7 +56,6 @@ RUN pip install numpy==1.23.5 # Remove extra build files RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json - # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push diff --git a/docker/Dockerfile-cpu b/docker/Dockerfile-cpu index fe8d88521f..ee7dfff1c0 100644 --- a/docker/Dockerfile-cpu +++ b/docker/Dockerfile-cpu @@ -2,8 +2,8 @@ # Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLO11 deployments -# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:23.10 +# Use official Python base image for reproducibility (3.11.10 for export and 3.12.6 for inference) +FROM python:3.11.10-slim-bookworm # Set environment variables ENV PYTHONUNBUFFERED=1 \ @@ -39,14 +39,14 @@ RUN pip install -e ".[export]" --extra-index-url https://download.pytorch.org/wh RUN yolo export model=tmp/yolo11n.pt format=edgetpu imgsz=32 RUN yolo export model=tmp/yolo11n.pt format=ncnn imgsz=32 # Requires Python<=3.10, bug with paddlepaddle==2.5.0 https://github.com/PaddlePaddle/X2Paddle/issues/991 -# RUN pip install "paddlepaddle>=2.6.0" x2paddle - -# Creates a symbolic link to make 'python' point to 'python3' -RUN ln -sf /usr/bin/python3 /usr/bin/python +RUN pip install "paddlepaddle>=2.6.0" x2paddle # Remove extra build files RUN rm -rf tmp /root/.config/Ultralytics/persistent_cache.json +# Set default command to bash +CMD ["/bin/bash"] + # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push diff --git a/docker/Dockerfile-runner b/docker/Dockerfile-runner index 539f0aa03e..514ca53f42 100644 --- a/docker/Dockerfile-runner +++ b/docker/Dockerfile-runner @@ -35,7 +35,6 @@ ENTRYPOINT sh -c './config.sh --url https://github.com/ultralytics/ultralytics \ --replace && \ ./run.sh' - # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push diff --git a/docs/en/datasets/index.md b/docs/en/datasets/index.md index 5f165b2477..38f219242c 100644 --- a/docs/en/datasets/index.md +++ b/docs/en/datasets/index.md @@ -74,6 +74,7 @@ Pose estimation is a technique used to determine the pose of the object relative - [COCO8-pose](pose/coco8-pose.md): A smaller dataset for pose estimation tasks, containing a subset of 8 COCO images with human pose annotations. - [Tiger-pose](pose/tiger-pose.md): A compact dataset consisting of 263 images focused on tigers, annotated with 12 keypoints per tiger for pose estimation tasks. - [Hand-Keypoints](pose/hand-keypoints.md): A concise dataset featuring over 26,000 images centered on human hands, annotated with 21 keypoints per hand, designed for pose estimation tasks. +- [Dog-pose](pose/dog-pose.md): A comprehensive dataset featuring approximately 6,000 images focused on dogs, annotated with 24 keypoints per dog, tailored for pose estimation tasks. ## [Classification](classify/index.md) diff --git a/docs/en/datasets/pose/dog-pose.md b/docs/en/datasets/pose/dog-pose.md new file mode 100644 index 0000000000..fa6acb0755 --- /dev/null +++ b/docs/en/datasets/pose/dog-pose.md @@ -0,0 +1,141 @@ +--- +comments: true +description: Discover the Dog-Pose dataset for pose detection. Featuring 6,773 training and 1,703 test images, it's a robust dataset for training YOLO11 models. +keywords: Dog-Pose, Ultralytics, pose detection dataset, YOLO11, machine learning, computer vision, training data +--- + +# Dog-Pose Dataset + +## Introduction + +The [Ultralytics](https://www.ultralytics.com/) Dog-pose dataset is a high-quality and extensive dataset specifically curated for dog keypoint estimation. With 6,773 training images and 1,703 test images, this dataset provides a solid foundation for training robust pose estimation models. Each annotated image includes 24 keypoints with 3 dimensions per keypoint (x, y, visibility), making it a valuable resource for advanced research and development in computer vision. + +Ultralytics Dog-pose display image + +This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com/) and [YOLO11](https://github.com/ultralytics/ultralytics). + +## Dataset YAML + +A YAML (Yet Another Markup Language) file is used to define the dataset configuration. It includes paths, keypoint details, and other relevant information. In the case of the Dog-pose dataset, The `dog-pose.yaml` is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dog-pose.yaml](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dog-pose.yaml). + +!!! example "ultralytics/cfg/datasets/dog-pose.yaml" + + ```yaml + --8<-- "ultralytics/cfg/datasets/dog-pose.yaml" + ``` + +## Usage + +To train a YOLO11n-pose model on the Dog-pose dataset for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 640, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page. + +!!! example "Train Example" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO("yolo11n-pose.pt") # load a pretrained model (recommended for training) + + # Train the model + results = model.train(data="dog-pose.yaml", epochs=100, imgsz=640) + ``` + + === "CLI" + + ```bash + # Start training from a pretrained *.pt model + yolo pose train data=dog-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 + ``` + +## Sample Images and Annotations + +Here are some examples of images from the Dog-pose dataset, along with their corresponding annotations: + +Dataset sample image + +- **Mosaiced Image**: This image demonstrates a training batch composed of mosaiced dataset images. Mosaicing is a technique used during training that combines multiple images into a single image to increase the variety of objects and scenes within each training batch. This helps improve the model's ability to generalize to different object sizes, aspect ratios, and contexts. + +The example showcases the variety and complexity of the images in the Dog-pose dataset and the benefits of using mosaicing during the training process. + +## Citations and Acknowledgments + +If you use the Dog-pose dataset in your research or development work, please cite the following paper: + +!!! quote "" + + === "BibTeX" + + ```bibtex + @inproceedings{khosla2011fgvc, + title={Novel dataset for Fine-Grained Image Categorization}, + author={Aditya Khosla and Nityananda Jayadevaprakash and Bangpeng Yao and Li Fei-Fei}, + booktitle={First Workshop on Fine-Grained Visual Categorization (FGVC), IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2011} + } + @inproceedings{deng2009imagenet, + title={ImageNet: A Large-Scale Hierarchical Image Database}, + author={Jia Deng and Wei Dong and Richard Socher and Li-Jia Li and Kai Li and Li Fei-Fei}, + booktitle={IEEE Computer Vision and Pattern Recognition (CVPR)}, + year={2009} + } + ``` + +We would like to acknowledge the Stanford team for creating and maintaining this valuable resource for the [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) community. For more information about the Dog-pose dataset and its creators, visit the [Stanford Dogs Dataset website](http://vision.stanford.edu/aditya86/ImageNetDogs/). + +## FAQ + +### What is the Dog-pose dataset, and how is it used with Ultralytics YOLO11? + +The Dog-Pose dataset features 6,000 images annotated with 17 keypoints for dog pose estimation. Ideal for training and validating models with [Ultralytics YOLO11](https://docs.ultralytics.com/models/yolo11/), it supports applications like animal behavior analysis and veterinary studies. + +### How do I train a YOLO11 model using the Dog-pose dataset in Ultralytics? + +To train a YOLO11n-pose model on the Dog-pose dataset for 100 epochs with an image size of 640, follow these examples: + +!!! example "Train Example" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO("yolo11n-pose.pt") + + # Train the model + results = model.train(data="dog-pose.yaml", epochs=100, imgsz=640) + ``` + + === "CLI" + + ```bash + yolo pose train data=dog-pose.yaml model=yolo11n-pose.pt epochs=100 imgsz=640 + ``` + +For a comprehensive list of training arguments, refer to the model [Training](../../modes/train.md) page. + +### What are the benefits of using the Dog-pose dataset? + +The Dog-pose dataset offers several benefits: + +**Large and Diverse Dataset**: With 6,000 images, it provides a substantial amount of data covering a wide range of dog poses, breeds, and contexts, enabling robust model training and evaluation. + +**Pose-specific Annotations**: Offers detailed annotations for pose estimation, ensuring high-quality data for training pose detection models. + +**Real-World Scenarios**: Includes images from varied environments, enhancing the model's ability to generalize to real-world applications. + +**Model Performance Improvement**: The diversity and scale of the dataset help improve model accuracy and robustness, particularly for tasks involving fine-grained pose estimation. + +For more about its features and usage, see the [Dataset Introduction](#introduction) section. + +### How does mosaicing benefit the YOLO11 training process using the Dog-pose dataset? + +Mosaicing, as illustrated in the sample images from the Dog-pose dataset, merges multiple images into a single composite, enriching the diversity of objects and scenes in each training batch. This approach enhances the model's capacity to generalize across different object sizes, aspect ratios, and contexts, leading to improved performance. For example images, refer to the [Sample Images and Annotations](#sample-images-and-annotations) section. + +### Where can I find the Dog-pose dataset YAML file and how do I use it? + +The Dog-pose dataset YAML file can be found [here](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/datasets/dog-pose.yaml). This file defines the dataset configuration, including paths, classes, and other relevant information. Use this file with the YOLO11 training scripts as mentioned in the [Train Example](#how-do-i-train-a-yolo11-model-using-the-dog-pose-dataset-in-ultralytics) section. + +For more FAQs and detailed documentation, visit the [Ultralytics Documentation](https://docs.ultralytics.com/). diff --git a/docs/en/datasets/pose/index.md b/docs/en/datasets/pose/index.md index 296b74f831..321bb9c128 100644 --- a/docs/en/datasets/pose/index.md +++ b/docs/en/datasets/pose/index.md @@ -127,6 +127,15 @@ This section outlines the datasets that are compatible with Ultralytics YOLO for - **Usage**: Great for human hand pose estimation. - [Read more about Hand Keypoints](hand-keypoints.md) +### Dog-Pose + +- **Description**: The Dog Pose dataset contains approximately 6,000 images, providing a diverse and extensive resource for training and validation of dog pose estimation models. +- **Label Format**: Follows the Ultralytics YOLO format, with annotations for multiple keypoints specific to dog anatomy. +- **Number of Classes**: 1 (Dog). +- **Keypoints**: Includes 24 keypoints tailored to dog poses, such as limbs, joints, and head positions. +- **Usage**: Ideal for training models to estimate dog poses in various scenarios, from research to real-world applications. +- [Read more about Dog-Pose](dog-pose.md) + ### Adding your own dataset If you have your own dataset and would like to use it for training pose estimation models with Ultralytics YOLO format, ensure that it follows the format specified above under "Ultralytics YOLO format". Convert your annotations to the required format and specify the paths, number of classes, and class names in the YAML configuration file. diff --git a/docs/en/guides/analytics.md b/docs/en/guides/analytics.md index dec9b4cce8..cd7fc40dcf 100644 --- a/docs/en/guides/analytics.md +++ b/docs/en/guides/analytics.md @@ -45,126 +45,15 @@ This guide provides a comprehensive overview of three fundamental types of [data # generate the pie chart yolo solutions analytics analytics_type="pie" show=True - ``` - - === "Python" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - out = cv2.VideoWriter( - "ultralytics_analytics.avi", - cv2.VideoWriter_fourcc(*"MJPG"), - fps, - (1920, 1080), # This is fixed - ) - analytics = solutions.Analytics( - analytics_type="line", - show=True, - ) + # generate the bar plots + yolo solutions analytics analytics_type="bar" show=True - frame_count = 0 - while cap.isOpened(): - success, im0 = cap.read() - if success: - frame_count += 1 - im0 = analytics.process_data(im0, frame_count) # update analytics graph every frame - out.write(im0) # write the video file - else: - break - - cap.release() - out.release() - cv2.destroyAllWindows() + # generate the area plots + yolo solutions analytics analytics_type="area" show=True ``` - === "Pie Chart" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - out = cv2.VideoWriter( - "ultralytics_analytics.avi", - cv2.VideoWriter_fourcc(*"MJPG"), - fps, - (1920, 1080), # This is fixed - ) - - analytics = solutions.Analytics( - analytics_type="pie", - show=True, - ) - - frame_count = 0 - while cap.isOpened(): - success, im0 = cap.read() - if success: - frame_count += 1 - im0 = analytics.process_data(im0, frame_count) # update analytics graph every frame - out.write(im0) # write the video file - else: - break - - cap.release() - out.release() - cv2.destroyAllWindows() - ``` - - === "Bar Plot" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - out = cv2.VideoWriter( - "ultralytics_analytics.avi", - cv2.VideoWriter_fourcc(*"MJPG"), - fps, - (1920, 1080), # This is fixed - ) - - analytics = solutions.Analytics( - analytics_type="bar", - show=True, - ) - - frame_count = 0 - while cap.isOpened(): - success, im0 = cap.read() - if success: - frame_count += 1 - im0 = analytics.process_data(im0, frame_count) # update analytics graph every frame - out.write(im0) # write the video file - else: - break - - cap.release() - out.release() - cv2.destroyAllWindows() - ``` - - === "Area chart" + === "Python" ```python import cv2 @@ -173,9 +62,9 @@ This guide provides a comprehensive overview of three fundamental types of [data cap = cv2.VideoCapture("Path/to/video/file.mp4") assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) + # Video writer out = cv2.VideoWriter( "ultralytics_analytics.avi", cv2.VideoWriter_fourcc(*"MJPG"), @@ -183,11 +72,15 @@ This guide provides a comprehensive overview of three fundamental types of [data (1920, 1080), # This is fixed ) + # Init analytics analytics = solutions.Analytics( - analytics_type="area", - show=True, + show=True, # Display the output + analytics_type="line", # Pass the analytics type, could be "pie", "bar" or "area". + model="yolo11n.pt", # Path to the YOLO11 model file + # classes=[0, 2], # If you want to count specific classes i.e person and car with COCO pretrained model. ) + # Process video frame_count = 0 while cap.isOpened(): success, im0 = cap.read() diff --git a/docs/en/guides/distance-calculation.md b/docs/en/guides/distance-calculation.md index 009899ae3c..c9775124d4 100644 --- a/docs/en/guides/distance-calculation.md +++ b/docs/en/guides/distance-calculation.md @@ -55,6 +55,7 @@ Measuring the gap between two objects is known as distance calculation within a # Init distance-calculation obj distance = solutions.DistanceCalculation(model="yolo11n.pt", show=True) + # Process video while cap.isOpened(): success, im0 = cap.read() if not success: diff --git a/docs/en/guides/heatmaps.md b/docs/en/guides/heatmaps.md index 66c26eaa01..038929ccfd 100644 --- a/docs/en/guides/heatmaps.md +++ b/docs/en/guides/heatmaps.md @@ -47,119 +47,12 @@ A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ult # Pass a custom colormap yolo solutions heatmap colormap=cv2.COLORMAP_INFERNO - ``` - - === "Python" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - # Video writer - video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) - - # Init heatmap - heatmap = solutions.Heatmap( - show=True, - model="yolo11n.pt", - colormap=cv2.COLORMAP_PARULA, - ) - - while cap.isOpened(): - success, im0 = cap.read() - if not success: - print("Video frame is empty or video processing has been successfully completed.") - break - im0 = heatmap.generate_heatmap(im0) - video_writer.write(im0) - - cap.release() - video_writer.release() - cv2.destroyAllWindows() - ``` - - === "Line Counting" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - # Video writer - video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) - # line for object counting - line_points = [(20, 400), (1080, 404)] - - # Init heatmap - heatmap = solutions.Heatmap( - show=True, - model="yolo11n.pt", - colormap=cv2.COLORMAP_PARULA, - region=line_points, - ) - - while cap.isOpened(): - success, im0 = cap.read() - if not success: - print("Video frame is empty or video processing has been successfully completed.") - break - im0 = heatmap.generate_heatmap(im0) - video_writer.write(im0) - - cap.release() - video_writer.release() - cv2.destroyAllWindows() + # Heatmaps + object counting + yolo solutions heatmap region=[(20, 400), (1080, 404), (1080, 360), (20, 360)] ``` - === "Polygon Counting" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - # Video writer - video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) - - # Define polygon points - region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)] - - # Init heatmap - heatmap = solutions.Heatmap( - show=True, - model="yolo11n.pt", - colormap=cv2.COLORMAP_PARULA, - region=region_points, - ) - - while cap.isOpened(): - success, im0 = cap.read() - if not success: - print("Video frame is empty or video processing has been successfully completed.") - break - im0 = heatmap.generate_heatmap(im0) - video_writer.write(im0) - - cap.release() - video_writer.release() - cv2.destroyAllWindows() - ``` - - === "Region Counting" + === "Python" ```python import cv2 @@ -173,51 +66,24 @@ A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ult # Video writer video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) - # Define region points - region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)] - - # Init heatmap - heatmap = solutions.Heatmap( - show=True, - model="yolo11n.pt", - colormap=cv2.COLORMAP_PARULA, - region=region_points, - ) - - while cap.isOpened(): - success, im0 = cap.read() - if not success: - print("Video frame is empty or video processing has been successfully completed.") - break - im0 = heatmap.generate_heatmap(im0) - video_writer.write(im0) - - cap.release() - video_writer.release() - cv2.destroyAllWindows() - ``` - - === "Specific Classes" - - ```python - import cv2 - - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - # Video writer - video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) + # In case you want to apply object counting + heatmaps, you can pass region points. + # region_points = [(20, 400), (1080, 404)] # Define line points + # region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)] # Define region points + # region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)] # Define polygon points # Init heatmap heatmap = solutions.Heatmap( - show=True, - model="yolo11n.pt", - classes=[0, 2], + show=True, # Display the output + model="yolo11n.pt", # Path to the YOLO11 model file + colormap=cv2.COLORMAP_PARULA, # Colormap of heatmap + # region=region_points, # If you want to do object counting with heatmaps, you can pass region_points + # classes=[0, 2], # If you want to generate heatmap for specific classes i.e person and car. + # show_in=True, # Display in counts + # show_out=True, # Display out counts + # line_width=2, # Adjust the line width for bounding boxes and text display ) + # Process video while cap.isOpened(): success, im0 = cap.read() if not success: diff --git a/docs/en/guides/object-counting.md b/docs/en/guides/object-counting.md index 144555793d..ba21ffac2b 100644 --- a/docs/en/guides/object-counting.md +++ b/docs/en/guides/object-counting.md @@ -19,7 +19,7 @@ Object counting with [Ultralytics YOLO11](https://github.com/ultralytics/ultraly allowfullscreen>
- Watch: Object Counting using Ultralytics YOLO11 + Watch: Object Counting using Ultralytics YOLOv8