From 63e7db1dac6e24502315aeb7df9e30e8cbf78a70 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Jan 2024 19:33:12 +0100 Subject: [PATCH] YAML reformat (#7669) Signed-off-by: Glenn Jocher --- .github/workflows/ci.yaml | 41 ++-- .github/workflows/cla.yml | 8 +- .github/workflows/codeql.yaml | 4 +- .github/workflows/docker.yaml | 8 +- .github/workflows/format.yml | 2 +- .github/workflows/links.yml | 2 +- .github/workflows/publish.yml | 4 +- .github/workflows/stale.yml | 6 +- .pre-commit-config.yaml | 6 +- ultralytics/cfg/datasets/Argoverse.yaml | 10 +- ultralytics/cfg/datasets/DOTAv1.5.yaml | 8 +- ultralytics/cfg/datasets/DOTAv1.yaml | 8 +- ultralytics/cfg/datasets/GlobalWheat2020.yaml | 4 +- ultralytics/cfg/datasets/ImageNet.yaml | 10 +- ultralytics/cfg/datasets/Objects365.yaml | 8 +- ultralytics/cfg/datasets/SKU-110K.yaml | 10 +- ultralytics/cfg/datasets/VOC.yaml | 2 - ultralytics/cfg/datasets/VisDrone.yaml | 10 +- ultralytics/cfg/datasets/coco-pose.yaml | 11 +- ultralytics/cfg/datasets/coco.yaml | 10 +- ultralytics/cfg/datasets/coco128-seg.yaml | 10 +- ultralytics/cfg/datasets/coco128.yaml | 10 +- ultralytics/cfg/datasets/coco8-pose.yaml | 11 +- ultralytics/cfg/datasets/coco8-seg.yaml | 10 +- ultralytics/cfg/datasets/coco8.yaml | 10 +- ultralytics/cfg/datasets/dota8.yaml | 6 +- ultralytics/cfg/datasets/open-images-v7.yaml | 10 +- ultralytics/cfg/datasets/tiger-pose.yaml | 9 +- ultralytics/cfg/datasets/xView.yaml | 8 +- ultralytics/cfg/default.yaml | 206 +++++++++--------- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml | 54 ++--- .../cfg/models/rt-detr/rtdetr-resnet101.yaml | 46 ++-- .../cfg/models/rt-detr/rtdetr-resnet50.yaml | 46 ++-- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml | 54 ++--- ultralytics/cfg/models/v3/yolov3-spp.yaml | 36 +-- ultralytics/cfg/models/v3/yolov3-tiny.yaml | 32 +-- ultralytics/cfg/models/v3/yolov3.yaml | 36 +-- ultralytics/cfg/models/v5/yolov5-p6.yaml | 48 ++-- ultralytics/cfg/models/v5/yolov5.yaml | 37 ++-- ultralytics/cfg/models/v6/yolov6.yaml | 34 +-- ultralytics/cfg/models/v8/yolov8-cls.yaml | 14 +- .../cfg/models/v8/yolov8-ghost-p2.yaml | 52 ++--- .../cfg/models/v8/yolov8-ghost-p6.yaml | 54 ++--- ultralytics/cfg/models/v8/yolov8-ghost.yaml | 46 ++-- ultralytics/cfg/models/v8/yolov8-obb.yaml | 46 ++-- ultralytics/cfg/models/v8/yolov8-p2.yaml | 46 ++-- ultralytics/cfg/models/v8/yolov8-p6.yaml | 48 ++-- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml | 50 ++--- ultralytics/cfg/models/v8/yolov8-pose.yaml | 38 ++-- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml | 46 ++-- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml | 48 ++-- ultralytics/cfg/models/v8/yolov8-seg.yaml | 36 +-- ultralytics/cfg/models/v8/yolov8.yaml | 46 ++-- ultralytics/cfg/trackers/botsort.yaml | 14 +- ultralytics/cfg/trackers/bytetrack.yaml | 12 +- 55 files changed, 725 insertions(+), 756 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e183b3e18..6ebea07cf 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -9,27 +9,27 @@ on: pull_request: branches: [main] schedule: - - cron: '0 0 * * *' # runs at 00:00 UTC every day + - cron: "0 0 * * *" # runs at 00:00 UTC every day workflow_dispatch: inputs: hub: - description: 'Run HUB' + description: "Run HUB" default: false type: boolean benchmarks: - description: 'Run Benchmarks' + description: "Run Benchmarks" default: false type: boolean tests: - description: 'Run Tests' + description: "Run Tests" default: false type: boolean gpu: - description: 'Run GPU' + description: "Run GPU" default: false type: boolean conda: - description: 'Run Conda' + description: "Run Conda" default: false type: boolean @@ -41,15 +41,15 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ['3.11'] + python-version: ["3.11"] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: 'pip' # caching pip dependencies + cache: "pip" # caching pip dependencies - name: Install requirements - shell: bash # for Windows compatibility + shell: bash # for Windows compatibility run: | python -m pip install --upgrade pip wheel pip install -e . --extra-index-url https://download.pytorch.org/whl/cpu @@ -95,16 +95,16 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ['3.11'] + python-version: ["3.11"] model: [yolov8n] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: 'pip' # caching pip dependencies + cache: "pip" # caching pip dependencies - name: Install requirements - shell: bash # for Windows compatibility + shell: bash # for Windows compatibility run: | python -m pip install --upgrade pip wheel pip install -e ".[export]" "coverage[toml]" --extra-index-url https://download.pytorch.org/whl/cpu @@ -150,21 +150,22 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ['3.11'] + python-version: ["3.11"] torch: [latest] include: - os: ubuntu-latest - python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8 - torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ + python-version: "3.8" # torch 1.8.0 requires python >=3.6, <=3.8 + torch: "1.8.0" # min torch version CI https://pypi.org/project/torchvision/ steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: 'pip' # caching pip dependencies + cache: "pip" # caching pip dependencies - name: Install requirements - shell: bash # for Windows compatibility - run: | # CoreML must be installed before export due to protobuf error from AutoInstall + shell: bash # for Windows compatibility + run: | + # CoreML must be installed before export due to protobuf error from AutoInstall python -m pip install --upgrade pip wheel torch="" if [ "${{ matrix.torch }}" == "1.8.0" ]; then @@ -176,7 +177,7 @@ jobs: yolo checks pip list - name: Pytest tests - shell: bash # for Windows compatibility + shell: bash # for Windows compatibility run: | slow="" if [[ "${{ github.event_name }}" == "schedule" ]] || [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then @@ -220,7 +221,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ['3.11'] + python-version: ["3.11"] defaults: run: shell: bash -el {0} diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 2d9bfe977..a11ee1963 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -24,14 +24,14 @@ jobs: # must be repository secret token PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} with: - path-to-signatures: 'signatures/version1/cla.json' - path-to-document: 'https://docs.ultralytics.com/help/CLA' # CLA document + path-to-signatures: "signatures/version1/cla.json" + path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document # branch should not be protected - branch: 'main' + branch: "main" allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot* remote-organization-name: ultralytics remote-repository-name: cla - custom-pr-sign-comment: 'I have read the CLA Document and I sign the CLA' + custom-pr-sign-comment: "I have read the CLA Document and I sign the CLA" custom-allsigned-prcomment: All Contributors have signed the CLA. ✅ #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign' diff --git a/.github/workflows/codeql.yaml b/.github/workflows/codeql.yaml index 5dc86e806..e6e3e85d3 100644 --- a/.github/workflows/codeql.yaml +++ b/.github/workflows/codeql.yaml @@ -4,7 +4,7 @@ name: "CodeQL" on: schedule: - - cron: '0 0 1 * *' + - cron: "0 0 1 * *" workflow_dispatch: jobs: @@ -19,7 +19,7 @@ jobs: strategy: fail-fast: false matrix: - language: ['python'] + language: ["python"] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] steps: diff --git a/.github/workflows/docker.yaml b/.github/workflows/docker.yaml index 53b2f942a..e98f8c015 100644 --- a/.github/workflows/docker.yaml +++ b/.github/workflows/docker.yaml @@ -64,7 +64,7 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 with: - fetch-depth: 0 # copy full .git directory to access full git history in Docker images + fetch-depth: 0 # copy full .git directory to access full git history in Docker images - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -115,12 +115,12 @@ jobs: -t ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} . - name: Run Tests - if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners + if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners run: docker run ultralytics/ultralytics:${{ matrix.tags }} /bin/bash -c "pip install pytest && pytest tests" - name: Run Benchmarks # WARNING: Dockerfile (GPU) error on TF.js export 'module 'numpy' has no attribute 'object'. - if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners + if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners run: docker run ultralytics/ultralytics:${{ matrix.tags }} yolo benchmark model=yolov8n.pt imgsz=160 verbose=0.318 - name: Push Docker Image with Ultralytics version tag @@ -139,7 +139,7 @@ jobs: fi - name: Notify on failure - if: github.event_name == 'push' && failure() # do not notify on cancelled() as cancelling is performed by hand + if: github.event_name == 'push' && failure() # do not notify on cancelled() as cancelling is performed by hand uses: slackapi/slack-github-action@v1.24.0 with: payload: | diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index d80132f92..e573b6b9d 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -17,7 +17,7 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets.GITHUB_TOKEN }} # automatically generated + token: ${{ secrets.GITHUB_TOKEN }} # automatically generated python: true docstrings: true markdown: true diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index bb7bd9831..7109800c9 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -12,7 +12,7 @@ name: Check Broken links on: workflow_dispatch: schedule: - - cron: '0 0 * * *' # runs at 00:00 UTC every day + - cron: "0 0 * * *" # runs at 00:00 UTC every day jobs: Links: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 8cf5e41f9..6756a0edf 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -28,8 +28,8 @@ jobs: - name: Set up Python environment uses: actions/setup-python@v5 with: - python-version: '3.11' - cache: 'pip' # caching pip dependencies + python-version: "3.11" + cache: "pip" # caching pip dependencies - name: Install dependencies run: | python -m pip install --upgrade pip wheel build twine diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index cc7fde6ae..dd8503541 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -3,7 +3,7 @@ name: Close stale issues on: schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + - cron: "0 0 * * *" # Runs at 00:00 UTC every day jobs: stale: @@ -43,5 +43,5 @@ jobs: days-before-issue-close: 10 days-before-pr-stale: 90 days-before-pr-close: 30 - exempt-issue-labels: 'documentation,tutorial,TODO' - operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting. + exempt-issue-labels: "documentation,tutorial,TODO" + operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 390468014..03461f148 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,7 +5,7 @@ # Define bot property if installed via https://github.com/marketplace/pre-commit-ci ci: autofix_prs: true - autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_commit_msg: "[pre-commit.ci] pre-commit suggestions" autoupdate_schedule: monthly submodules: true @@ -55,7 +55,7 @@ repos: rev: v2.2.6 hooks: - id: codespell - exclude: 'docs/de|docs/fr|docs/pt|docs/es|docs/mkdocs_de.yml' + exclude: "docs/de|docs/fr|docs/pt|docs/es|docs/mkdocs_de.yml" args: - --ignore-words-list=crate,nd,ned,strack,dota,ane,segway,fo,gool,winn,commend,bloc,nam,afterall @@ -64,7 +64,7 @@ repos: hooks: - id: pycln args: [--all] - +# # - repo: https://github.com/PyCQA/docformatter # rev: v1.7.5 # hooks: diff --git a/ultralytics/cfg/datasets/Argoverse.yaml b/ultralytics/cfg/datasets/Argoverse.yaml index 6c3690bdf..138b57504 100644 --- a/ultralytics/cfg/datasets/Argoverse.yaml +++ b/ultralytics/cfg/datasets/Argoverse.yaml @@ -7,12 +7,11 @@ # └── datasets # └── Argoverse ← downloads here (31.5 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/Argoverse # dataset root dir -train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images -val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images -test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview # Classes names: @@ -25,7 +24,6 @@ names: 6: traffic_light 7: stop_sign - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import json diff --git a/ultralytics/cfg/datasets/DOTAv1.5.yaml b/ultralytics/cfg/datasets/DOTAv1.5.yaml index 7e9b4d410..701535fcf 100644 --- a/ultralytics/cfg/datasets/DOTAv1.5.yaml +++ b/ultralytics/cfg/datasets/DOTAv1.5.yaml @@ -8,10 +8,10 @@ # └── dota1.5 ← downloads here (2GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/DOTAv1.5 # dataset root dir -train: images/train # train images (relative to 'path') 1411 images -val: images/val # val images (relative to 'path') 458 images -test: images/test # test images (optional) 937 images +path: ../datasets/DOTAv1.5 # dataset root dir +train: images/train # train images (relative to 'path') 1411 images +val: images/val # val images (relative to 'path') 458 images +test: images/test # test images (optional) 937 images # Classes for DOTA 1.5 names: diff --git a/ultralytics/cfg/datasets/DOTAv1.yaml b/ultralytics/cfg/datasets/DOTAv1.yaml index 7fedfd30a..f6364d345 100644 --- a/ultralytics/cfg/datasets/DOTAv1.yaml +++ b/ultralytics/cfg/datasets/DOTAv1.yaml @@ -8,10 +8,10 @@ # └── dota1 ← downloads here (2GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/DOTAv1 # dataset root dir -train: images/train # train images (relative to 'path') 1411 images -val: images/val # val images (relative to 'path') 458 images -test: images/test # test images (optional) 937 images +path: ../datasets/DOTAv1 # dataset root dir +train: images/train # train images (relative to 'path') 1411 images +val: images/val # val images (relative to 'path') 458 images +test: images/test # test images (optional) 937 images # Classes for DOTA 1.0 names: diff --git a/ultralytics/cfg/datasets/GlobalWheat2020.yaml b/ultralytics/cfg/datasets/GlobalWheat2020.yaml index 712fec358..ae6bfa0da 100644 --- a/ultralytics/cfg/datasets/GlobalWheat2020.yaml +++ b/ultralytics/cfg/datasets/GlobalWheat2020.yaml @@ -7,9 +7,8 @@ # └── datasets # └── GlobalWheat2020 ← downloads here (7.0 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/GlobalWheat2020 # dataset root dir +path: ../datasets/GlobalWheat2020 # dataset root dir train: # train images (relative to 'path') 3422 images - images/arvalis_1 - images/arvalis_2 @@ -30,7 +29,6 @@ test: # test images (optional) 1276 images names: 0: wheat_head - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from ultralytics.utils.downloads import download diff --git a/ultralytics/cfg/datasets/ImageNet.yaml b/ultralytics/cfg/datasets/ImageNet.yaml index fbdb9aa17..0dc344abb 100644 --- a/ultralytics/cfg/datasets/ImageNet.yaml +++ b/ultralytics/cfg/datasets/ImageNet.yaml @@ -8,12 +8,11 @@ # └── datasets # └── imagenet ← downloads here (144 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/imagenet # dataset root dir -train: train # train images (relative to 'path') 1281167 images -val: val # val images (relative to 'path') 50000 images -test: # test images (optional) +path: ../datasets/imagenet # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) # Classes names: @@ -2021,6 +2020,5 @@ map: n13133613: ear n15075141: toilet_tissue - # Download script/URL (optional) download: yolo/data/scripts/get_imagenet.sh diff --git a/ultralytics/cfg/datasets/Objects365.yaml b/ultralytics/cfg/datasets/Objects365.yaml index 6d38f4d24..9b117206f 100644 --- a/ultralytics/cfg/datasets/Objects365.yaml +++ b/ultralytics/cfg/datasets/Objects365.yaml @@ -7,12 +7,11 @@ # └── datasets # └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/Objects365 # dataset root dir -train: images/train # train images (relative to 'path') 1742289 images +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images val: images/val # val images (relative to 'path') 80000 images -test: # test images (optional) +test: # test images (optional) # Classes names: @@ -382,7 +381,6 @@ names: 363: Curling 364: Table Tennis - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from tqdm import tqdm diff --git a/ultralytics/cfg/datasets/SKU-110K.yaml b/ultralytics/cfg/datasets/SKU-110K.yaml index 19c1635bc..fff1baa48 100644 --- a/ultralytics/cfg/datasets/SKU-110K.yaml +++ b/ultralytics/cfg/datasets/SKU-110K.yaml @@ -7,18 +7,16 @@ # └── datasets # └── SKU-110K ← downloads here (13.6 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/SKU-110K # dataset root dir -train: train.txt # train images (relative to 'path') 8219 images -val: val.txt # val images (relative to 'path') 588 images -test: test.txt # test images (optional) 2936 images +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images # Classes names: 0: object - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import shutil diff --git a/ultralytics/cfg/datasets/VOC.yaml b/ultralytics/cfg/datasets/VOC.yaml index 6f76c6320..cd6d5ade2 100644 --- a/ultralytics/cfg/datasets/VOC.yaml +++ b/ultralytics/cfg/datasets/VOC.yaml @@ -7,7 +7,6 @@ # └── datasets # └── VOC ← downloads here (2.8 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] path: ../datasets/VOC train: # train images (relative to 'path') 16551 images @@ -43,7 +42,6 @@ names: 18: train 19: tvmonitor - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import xml.etree.ElementTree as ET diff --git a/ultralytics/cfg/datasets/VisDrone.yaml b/ultralytics/cfg/datasets/VisDrone.yaml index c483d9b42..773f0b08b 100644 --- a/ultralytics/cfg/datasets/VisDrone.yaml +++ b/ultralytics/cfg/datasets/VisDrone.yaml @@ -7,12 +7,11 @@ # └── datasets # └── VisDrone ← downloads here (2.3 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/VisDrone # dataset root dir -train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images -val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images -test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images # Classes names: @@ -27,7 +26,6 @@ names: 8: bus 9: motor - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import os diff --git a/ultralytics/cfg/datasets/coco-pose.yaml b/ultralytics/cfg/datasets/coco-pose.yaml index 849529ce1..b50b7a5b5 100644 --- a/ultralytics/cfg/datasets/coco-pose.yaml +++ b/ultralytics/cfg/datasets/coco-pose.yaml @@ -7,15 +7,14 @@ # └── datasets # └── coco-pose ← downloads here (20.1 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco-pose # dataset root dir -train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # val images (relative to 'path') 5000 images -test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 +path: ../datasets/coco-pose # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Keypoints -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] # Classes diff --git a/ultralytics/cfg/datasets/coco.yaml b/ultralytics/cfg/datasets/coco.yaml index 8881ed6da..d0297f760 100644 --- a/ultralytics/cfg/datasets/coco.yaml +++ b/ultralytics/cfg/datasets/coco.yaml @@ -7,12 +7,11 @@ # └── datasets # └── coco ← downloads here (20.1 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco # dataset root dir -train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # val images (relative to 'path') 5000 images -test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Classes names: @@ -97,7 +96,6 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: | from ultralytics.utils.downloads import download diff --git a/ultralytics/cfg/datasets/coco128-seg.yaml b/ultralytics/cfg/datasets/coco128-seg.yaml index 176d51266..e898a403e 100644 --- a/ultralytics/cfg/datasets/coco128-seg.yaml +++ b/ultralytics/cfg/datasets/coco128-seg.yaml @@ -7,12 +7,11 @@ # └── datasets # └── coco128-seg ← downloads here (7 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco128-seg # dataset root dir -train: images/train2017 # train images (relative to 'path') 128 images -val: images/train2017 # val images (relative to 'path') 128 images -test: # test images (optional) +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) # Classes names: @@ -97,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/ultralytics/cfg/datasets/coco128.yaml b/ultralytics/cfg/datasets/coco128.yaml index a66cbe50d..8d47ee0b7 100644 --- a/ultralytics/cfg/datasets/coco128.yaml +++ b/ultralytics/cfg/datasets/coco128.yaml @@ -7,12 +7,11 @@ # └── datasets # └── coco128 ← downloads here (7 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco128 # dataset root dir -train: images/train2017 # train images (relative to 'path') 128 images -val: images/train2017 # val images (relative to 'path') 128 images -test: # test images (optional) +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) # Classes names: @@ -97,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco128.zip diff --git a/ultralytics/cfg/datasets/coco8-pose.yaml b/ultralytics/cfg/datasets/coco8-pose.yaml index dcb6424b0..4dee5be73 100644 --- a/ultralytics/cfg/datasets/coco8-pose.yaml +++ b/ultralytics/cfg/datasets/coco8-pose.yaml @@ -7,15 +7,14 @@ # └── datasets # └── coco8-pose ← downloads here (1 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco8-pose # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images -test: # test images (optional) +path: ../datasets/coco8-pose # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) # Keypoints -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] # Classes diff --git a/ultralytics/cfg/datasets/coco8-seg.yaml b/ultralytics/cfg/datasets/coco8-seg.yaml index 1f54a233e..d8b6ed295 100644 --- a/ultralytics/cfg/datasets/coco8-seg.yaml +++ b/ultralytics/cfg/datasets/coco8-seg.yaml @@ -7,12 +7,11 @@ # └── datasets # └── coco8-seg ← downloads here (1 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco8-seg # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images -test: # test images (optional) +path: ../datasets/coco8-seg # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) # Classes names: @@ -97,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco8-seg.zip diff --git a/ultralytics/cfg/datasets/coco8.yaml b/ultralytics/cfg/datasets/coco8.yaml index 97ac4a294..2925f8186 100644 --- a/ultralytics/cfg/datasets/coco8.yaml +++ b/ultralytics/cfg/datasets/coco8.yaml @@ -7,12 +7,11 @@ # └── datasets # └── coco8 ← downloads here (1 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/coco8 # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images -test: # test images (optional) +path: ../datasets/coco8 # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images +test: # test images (optional) # Classes names: @@ -97,6 +96,5 @@ names: 78: hair drier 79: toothbrush - # Download script/URL (optional) download: https://ultralytics.com/assets/coco8.zip diff --git a/ultralytics/cfg/datasets/dota8.yaml b/ultralytics/cfg/datasets/dota8.yaml index cbf6361ab..f58b501f2 100644 --- a/ultralytics/cfg/datasets/dota8.yaml +++ b/ultralytics/cfg/datasets/dota8.yaml @@ -8,9 +8,9 @@ # └── dota8 ← downloads here (1MB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/dota8 # dataset root dir -train: images/train # train images (relative to 'path') 4 images -val: images/val # val images (relative to 'path') 4 images +path: ../datasets/dota8 # dataset root dir +train: images/train # train images (relative to 'path') 4 images +val: images/val # val images (relative to 'path') 4 images # Classes for DOTA 1.0 names: diff --git a/ultralytics/cfg/datasets/open-images-v7.yaml b/ultralytics/cfg/datasets/open-images-v7.yaml index db44dabfc..d9cad9f1d 100644 --- a/ultralytics/cfg/datasets/open-images-v7.yaml +++ b/ultralytics/cfg/datasets/open-images-v7.yaml @@ -7,12 +7,11 @@ # └── datasets # └── open-images-v7 ← downloads here (561 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/open-images-v7 # dataset root dir -train: images/train # train images (relative to 'path') 1743042 images -val: images/val # val images (relative to 'path') 41620 images -test: # test images (optional) +path: ../datasets/open-images-v7 # dataset root dir +train: images/train # train images (relative to 'path') 1743042 images +val: images/val # val images (relative to 'path') 41620 images +test: # test images (optional) # Classes names: @@ -618,7 +617,6 @@ names: 599: Zebra 600: Zucchini - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from ultralytics.utils import LOGGER, SETTINGS, Path, is_ubuntu, get_ubuntu_version diff --git a/ultralytics/cfg/datasets/tiger-pose.yaml b/ultralytics/cfg/datasets/tiger-pose.yaml index 2d886c5eb..d37df04a5 100644 --- a/ultralytics/cfg/datasets/tiger-pose.yaml +++ b/ultralytics/cfg/datasets/tiger-pose.yaml @@ -7,14 +7,13 @@ # └── datasets # └── tiger-pose ← downloads here (75.3 MB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/tiger-pose # dataset root dir -train: train # train images (relative to 'path') 210 images -val: val # val images (relative to 'path') 53 images +path: ../datasets/tiger-pose # dataset root dir +train: train # train images (relative to 'path') 210 images +val: val # val images (relative to 'path') 53 images # Keypoints -kpt_shape: [12, 2] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +kpt_shape: [12, 2] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) flip_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # Classes diff --git a/ultralytics/cfg/datasets/xView.yaml b/ultralytics/cfg/datasets/xView.yaml index 6886ceb03..d2e957ad5 100644 --- a/ultralytics/cfg/datasets/xView.yaml +++ b/ultralytics/cfg/datasets/xView.yaml @@ -8,11 +8,10 @@ # └── datasets # └── xView ← downloads here (20.7 GB) - # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] -path: ../datasets/xView # dataset root dir -train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images -val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images +path: ../datasets/xView # dataset root dir +train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images +val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images # Classes names: @@ -77,7 +76,6 @@ names: 58: Pylon 59: Tower - # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import json diff --git a/ultralytics/cfg/default.yaml b/ultralytics/cfg/default.yaml index a05edb347..fa4b45a71 100644 --- a/ultralytics/cfg/default.yaml +++ b/ultralytics/cfg/default.yaml @@ -1,125 +1,125 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Default training settings and hyperparameters for medium-augmentation COCO training -task: detect # (str) YOLO task, i.e. detect, segment, classify, pose -mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark +task: detect # (str) YOLO task, i.e. detect, segment, classify, pose +mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark # Train settings ------------------------------------------------------------------------------------------------------- -model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml -data: # (str, optional) path to data file, i.e. coco128.yaml -epochs: 100 # (int) number of epochs to train for -time: # (float, optional) number of hours to train for, overrides epochs if supplied -patience: 50 # (int) epochs to wait for no observable improvement for early stopping of training -batch: 16 # (int) number of images per batch (-1 for AutoBatch) -imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes -save: True # (bool) save train checkpoints and predict results +model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml +data: # (str, optional) path to data file, i.e. coco128.yaml +epochs: 100 # (int) number of epochs to train for +time: # (float, optional) number of hours to train for, overrides epochs if supplied +patience: 50 # (int) epochs to wait for no observable improvement for early stopping of training +batch: 16 # (int) number of images per batch (-1 for AutoBatch) +imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes +save: True # (bool) save train checkpoints and predict results save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1) -cache: False # (bool) True/ram, disk or False. Use cache for data loading -device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu -workers: 8 # (int) number of worker threads for data loading (per RANK if DDP) -project: # (str, optional) project name -name: # (str, optional) experiment name, results saved to 'project/name' directory -exist_ok: False # (bool) whether to overwrite existing experiment -pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) -optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] -verbose: True # (bool) whether to print verbose output -seed: 0 # (int) random seed for reproducibility -deterministic: True # (bool) whether to enable deterministic mode -single_cls: False # (bool) train multi-class data as single-class -rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val' -cos_lr: False # (bool) use cosine learning rate scheduler -close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable) -resume: False # (bool) resume training from last checkpoint -amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check -fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set) -profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers -freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training -multi_scale: False # (bool) Whether to use multi-scale during training +cache: False # (bool) True/ram, disk or False. Use cache for data loading +device: # (int | str | list, optional) device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu +workers: 8 # (int) number of worker threads for data loading (per RANK if DDP) +project: # (str, optional) project name +name: # (str, optional) experiment name, results saved to 'project/name' directory +exist_ok: False # (bool) whether to overwrite existing experiment +pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) +optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] +verbose: True # (bool) whether to print verbose output +seed: 0 # (int) random seed for reproducibility +deterministic: True # (bool) whether to enable deterministic mode +single_cls: False # (bool) train multi-class data as single-class +rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val' +cos_lr: False # (bool) use cosine learning rate scheduler +close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable) +resume: False # (bool) resume training from last checkpoint +amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check +fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set) +profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers +freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training +multi_scale: False # (bool) Whether to use multi-scale during training # Segmentation -overlap_mask: True # (bool) masks should overlap during training (segment train only) -mask_ratio: 4 # (int) mask downsample ratio (segment train only) +overlap_mask: True # (bool) masks should overlap during training (segment train only) +mask_ratio: 4 # (int) mask downsample ratio (segment train only) # Classification -dropout: 0.0 # (float) use dropout regularization (classify train only) +dropout: 0.0 # (float) use dropout regularization (classify train only) # Val/Test settings ---------------------------------------------------------------------------------------------------- -val: True # (bool) validate/test during training -split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train' -save_json: False # (bool) save results to JSON file -save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions) -conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val) -iou: 0.7 # (float) intersection over union (IoU) threshold for NMS -max_det: 300 # (int) maximum number of detections per image -half: False # (bool) use half precision (FP16) -dnn: False # (bool) use OpenCV DNN for ONNX inference -plots: True # (bool) save plots and images during train/val +val: True # (bool) validate/test during training +split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train' +save_json: False # (bool) save results to JSON file +save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions) +conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val) +iou: 0.7 # (float) intersection over union (IoU) threshold for NMS +max_det: 300 # (int) maximum number of detections per image +half: False # (bool) use half precision (FP16) +dnn: False # (bool) use OpenCV DNN for ONNX inference +plots: True # (bool) save plots and images during train/val # Predict settings ----------------------------------------------------------------------------------------------------- -source: # (str, optional) source directory for images or videos -vid_stride: 1 # (int) video frame-rate stride -stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False) -visualize: False # (bool) visualize model features -augment: False # (bool) apply image augmentation to prediction sources -agnostic_nms: False # (bool) class-agnostic NMS -classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3] -retina_masks: False # (bool) use high-resolution segmentation masks -embed: # (list[int], optional) return feature vectors/embeddings from given layers +source: # (str, optional) source directory for images or videos +vid_stride: 1 # (int) video frame-rate stride +stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False) +visualize: False # (bool) visualize model features +augment: False # (bool) apply image augmentation to prediction sources +agnostic_nms: False # (bool) class-agnostic NMS +classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3] +retina_masks: False # (bool) use high-resolution segmentation masks +embed: # (list[int], optional) return feature vectors/embeddings from given layers # Visualize settings --------------------------------------------------------------------------------------------------- -show: False # (bool) show predicted images and videos if environment allows -save_frames: False # (bool) save predicted individual video frames -save_txt: False # (bool) save results as .txt file -save_conf: False # (bool) save results with confidence scores -save_crop: False # (bool) save cropped images with results -show_labels: True # (bool) show prediction labels, i.e. 'person' -show_conf: True # (bool) show prediction confidence, i.e. '0.99' -show_boxes: True # (bool) show prediction boxes -line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None. +show: False # (bool) show predicted images and videos if environment allows +save_frames: False # (bool) save predicted individual video frames +save_txt: False # (bool) save results as .txt file +save_conf: False # (bool) save results with confidence scores +save_crop: False # (bool) save cropped images with results +show_labels: True # (bool) show prediction labels, i.e. 'person' +show_conf: True # (bool) show prediction confidence, i.e. '0.99' +show_boxes: True # (bool) show prediction boxes +line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None. # Export settings ------------------------------------------------------------------------------------------------------ -format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats -keras: False # (bool) use Kera=s -optimize: False # (bool) TorchScript: optimize for mobile -int8: False # (bool) CoreML/TF INT8 quantization -dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes -simplify: False # (bool) ONNX: simplify model -opset: # (int, optional) ONNX: opset version -workspace: 4 # (int) TensorRT: workspace size (GB) -nms: False # (bool) CoreML: add NMS +format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats +keras: False # (bool) use Kera=s +optimize: False # (bool) TorchScript: optimize for mobile +int8: False # (bool) CoreML/TF INT8 quantization +dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes +simplify: False # (bool) ONNX: simplify model +opset: # (int, optional) ONNX: opset version +workspace: 4 # (int) TensorRT: workspace size (GB) +nms: False # (bool) CoreML: add NMS # Hyperparameters ------------------------------------------------------------------------------------------------------ -lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3) -lrf: 0.01 # (float) final learning rate (lr0 * lrf) -momentum: 0.937 # (float) SGD momentum/Adam beta1 -weight_decay: 0.0005 # (float) optimizer weight decay 5e-4 -warmup_epochs: 3.0 # (float) warmup epochs (fractions ok) -warmup_momentum: 0.8 # (float) warmup initial momentum -warmup_bias_lr: 0.1 # (float) warmup initial bias lr -box: 7.5 # (float) box loss gain -cls: 0.5 # (float) cls loss gain (scale with pixels) -dfl: 1.5 # (float) dfl loss gain -pose: 12.0 # (float) pose loss gain -kobj: 1.0 # (float) keypoint obj loss gain -label_smoothing: 0.0 # (float) label smoothing (fraction) -nbs: 64 # (int) nominal batch size -hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction) -hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction) -hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction) -degrees: 0.0 # (float) image rotation (+/- deg) -translate: 0.1 # (float) image translation (+/- fraction) -scale: 0.5 # (float) image scale (+/- gain) -shear: 0.0 # (float) image shear (+/- deg) -perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001 -flipud: 0.0 # (float) image flip up-down (probability) -fliplr: 0.5 # (float) image flip left-right (probability) -mosaic: 1.0 # (float) image mosaic (probability) -mixup: 0.0 # (float) image mixup (probability) -copy_paste: 0.0 # (float) segment copy-paste (probability) -auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix) -erasing: 0.4 # (float) probability of random erasing during classification training (0-1) -crop_fraction: 1.0 # (float) image crop fraction for classification evaluation/inference (0-1) +lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3) +lrf: 0.01 # (float) final learning rate (lr0 * lrf) +momentum: 0.937 # (float) SGD momentum/Adam beta1 +weight_decay: 0.0005 # (float) optimizer weight decay 5e-4 +warmup_epochs: 3.0 # (float) warmup epochs (fractions ok) +warmup_momentum: 0.8 # (float) warmup initial momentum +warmup_bias_lr: 0.1 # (float) warmup initial bias lr +box: 7.5 # (float) box loss gain +cls: 0.5 # (float) cls loss gain (scale with pixels) +dfl: 1.5 # (float) dfl loss gain +pose: 12.0 # (float) pose loss gain +kobj: 1.0 # (float) keypoint obj loss gain +label_smoothing: 0.0 # (float) label smoothing (fraction) +nbs: 64 # (int) nominal batch size +hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction) +degrees: 0.0 # (float) image rotation (+/- deg) +translate: 0.1 # (float) image translation (+/- fraction) +scale: 0.5 # (float) image scale (+/- gain) +shear: 0.0 # (float) image shear (+/- deg) +perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # (float) image flip up-down (probability) +fliplr: 0.5 # (float) image flip left-right (probability) +mosaic: 1.0 # (float) image mosaic (probability) +mixup: 0.0 # (float) image mixup (probability) +copy_paste: 0.0 # (float) segment copy-paste (probability) +auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix) +erasing: 0.4 # (float) probability of random erasing during classification training (0-1) +crop_fraction: 1.0 # (float) image crop fraction for classification evaluation/inference (0-1) # Custom config.yaml --------------------------------------------------------------------------------------------------- -cfg: # (str, optional) for overriding defaults.yaml +cfg: # (str, optional) for overriding defaults.yaml # Tracker settings ------------------------------------------------------------------------------------------------------ -tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml] +tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml] diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml index b7bb57fb8..c6eb0b3ea 100644 --- a/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +++ b/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml @@ -2,49 +2,49 @@ # RT-DETR-l object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] l: [1.00, 1.00, 1024] backbone: # [from, repeats, module, args] - - [-1, 1, HGStem, [32, 48]] # 0-P2/4 - - [-1, 6, HGBlock, [48, 128, 3]] # stage 1 + - [-1, 1, HGStem, [32, 48]] # 0-P2/4 + - [-1, 6, HGBlock, [48, 128, 3]] # stage 1 - - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 - - [-1, 6, HGBlock, [96, 512, 3]] # stage 2 + - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 + - [-1, 6, HGBlock, [96, 512, 3]] # stage 2 - - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P3/16 - - [-1, 6, HGBlock, [192, 1024, 5, True, False]] # cm, c2, k, light, shortcut + - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 4-P3/16 + - [-1, 6, HGBlock, [192, 1024, 5, True, False]] # cm, c2, k, light, shortcut - [-1, 6, HGBlock, [192, 1024, 5, True, True]] - - [-1, 6, HGBlock, [192, 1024, 5, True, True]] # stage 3 + - [-1, 6, HGBlock, [192, 1024, 5, True, True]] # stage 3 - - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P4/32 - - [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4 + - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 8-P4/32 + - [-1, 6, HGBlock, [384, 2048, 5, True, False]] # stage 4 head: - - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2 + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 10 input_proj.2 - [-1, 1, AIFI, [1024, 8]] - - [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0 + - [-1, 1, Conv, [256, 1, 1]] # 12, Y5, lateral_convs.0 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [7, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 input_proj.1 - [[-2, -1], 1, Concat, [1]] - - [-1, 3, RepC3, [256]] # 16, fpn_blocks.0 - - [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1 + - [-1, 3, RepC3, [256]] # 16, fpn_blocks.0 + - [-1, 1, Conv, [256, 1, 1]] # 17, Y4, lateral_convs.1 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0 - - [[-2, -1], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 19 input_proj.0 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (21), fpn_blocks.1 - - [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0 - - [[-1, 17], 1, Concat, [1]] # cat Y4 - - [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0 + - [-1, 1, Conv, [256, 3, 2]] # 22, downsample_convs.0 + - [[-1, 17], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (24), pan_blocks.0 - - [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1 - - [[-1, 12], 1, Concat, [1]] # cat Y5 - - [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1 + - [-1, 1, Conv, [256, 3, 2]] # 25, downsample_convs.1 + - [[-1, 12], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (27), pan_blocks.1 - - [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[21, 24, 27], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml index 949a7e6ad..a68bb5dda 100644 --- a/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +++ b/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml @@ -2,41 +2,41 @@ # RT-DETR-ResNet101 object detection model with P3-P5 outputs. # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] l: [1.00, 1.00, 1024] backbone: # [from, repeats, module, args] - - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0 - - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1 - - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2 - - [-1, 1, ResNetLayer, [512, 256, 2, False, 23]] # 3 - - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4 + - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0 + - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1 + - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2 + - [-1, 1, ResNetLayer, [512, 256, 2, False, 23]] # 3 + - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4 head: - - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 5 + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 5 - [-1, 1, AIFI, [1024, 8]] - - [-1, 1, Conv, [256, 1, 1]] # 7 + - [-1, 1, Conv, [256, 1, 1]] # 7 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 9 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 9 - [[-2, -1], 1, Concat, [1]] - - [-1, 3, RepC3, [256]] # 11 - - [-1, 1, Conv, [256, 1, 1]] # 12 + - [-1, 3, RepC3, [256]] # 11 + - [-1, 1, Conv, [256, 1, 1]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [2, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 - - [[-2, -1], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, RepC3, [256]] # X3 (16), fpn_blocks.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [2, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (16), fpn_blocks.1 - - [-1, 1, Conv, [256, 3, 2]] # 17, downsample_convs.0 - - [[-1, 12], 1, Concat, [1]] # cat Y4 - - [-1, 3, RepC3, [256]] # F4 (19), pan_blocks.0 + - [-1, 1, Conv, [256, 3, 2]] # 17, downsample_convs.0 + - [[-1, 12], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (19), pan_blocks.0 - - [-1, 1, Conv, [256, 3, 2]] # 20, downsample_convs.1 - - [[-1, 7], 1, Concat, [1]] # cat Y5 - - [-1, 3, RepC3, [256]] # F5 (22), pan_blocks.1 + - [-1, 1, Conv, [256, 3, 2]] # 20, downsample_convs.1 + - [[-1, 7], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (22), pan_blocks.1 - - [[16, 19, 22], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[16, 19, 22], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml index bd1228d17..714591041 100644 --- a/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +++ b/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml @@ -2,41 +2,41 @@ # RT-DETR-ResNet50 object detection model with P3-P5 outputs. # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] l: [1.00, 1.00, 1024] backbone: # [from, repeats, module, args] - - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0 - - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1 - - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2 - - [-1, 1, ResNetLayer, [512, 256, 2, False, 6]] # 3 - - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4 + - [-1, 1, ResNetLayer, [3, 64, 1, True, 1]] # 0 + - [-1, 1, ResNetLayer, [64, 64, 1, False, 3]] # 1 + - [-1, 1, ResNetLayer, [256, 128, 2, False, 4]] # 2 + - [-1, 1, ResNetLayer, [512, 256, 2, False, 6]] # 3 + - [-1, 1, ResNetLayer, [1024, 512, 2, False, 3]] # 4 head: - - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 5 + - [-1, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 5 - [-1, 1, AIFI, [1024, 8]] - - [-1, 1, Conv, [256, 1, 1]] # 7 + - [-1, 1, Conv, [256, 1, 1]] # 7 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 9 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [3, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 9 - [[-2, -1], 1, Concat, [1]] - - [-1, 3, RepC3, [256]] # 11 - - [-1, 1, Conv, [256, 1, 1]] # 12 + - [-1, 3, RepC3, [256]] # 11 + - [-1, 1, Conv, [256, 1, 1]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [2, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 - - [[-2, -1], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, RepC3, [256]] # X3 (16), fpn_blocks.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [2, 1, Conv, [256, 1, 1, None, 1, 1, False]] # 14 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [256]] # X3 (16), fpn_blocks.1 - - [-1, 1, Conv, [256, 3, 2]] # 17, downsample_convs.0 - - [[-1, 12], 1, Concat, [1]] # cat Y4 - - [-1, 3, RepC3, [256]] # F4 (19), pan_blocks.0 + - [-1, 1, Conv, [256, 3, 2]] # 17, downsample_convs.0 + - [[-1, 12], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [256]] # F4 (19), pan_blocks.0 - - [-1, 1, Conv, [256, 3, 2]] # 20, downsample_convs.1 - - [[-1, 7], 1, Concat, [1]] # cat Y5 - - [-1, 3, RepC3, [256]] # F5 (22), pan_blocks.1 + - [-1, 1, Conv, [256, 3, 2]] # 20, downsample_convs.1 + - [[-1, 7], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [256]] # F5 (22), pan_blocks.1 - - [[16, 19, 22], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[16, 19, 22], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml b/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml index 2894bc0d9..0e819b0a0 100644 --- a/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +++ b/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml @@ -2,53 +2,53 @@ # RT-DETR-x object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/rtdetr # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] x: [1.00, 1.00, 2048] backbone: # [from, repeats, module, args] - - [-1, 1, HGStem, [32, 64]] # 0-P2/4 - - [-1, 6, HGBlock, [64, 128, 3]] # stage 1 + - [-1, 1, HGStem, [32, 64]] # 0-P2/4 + - [-1, 6, HGBlock, [64, 128, 3]] # stage 1 - - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 + - [-1, 1, DWConv, [128, 3, 2, 1, False]] # 2-P3/8 - [-1, 6, HGBlock, [128, 512, 3]] - - [-1, 6, HGBlock, [128, 512, 3, False, True]] # 4-stage 2 + - [-1, 6, HGBlock, [128, 512, 3, False, True]] # 4-stage 2 - - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 5-P3/16 - - [-1, 6, HGBlock, [256, 1024, 5, True, False]] # cm, c2, k, light, shortcut + - [-1, 1, DWConv, [512, 3, 2, 1, False]] # 5-P3/16 + - [-1, 6, HGBlock, [256, 1024, 5, True, False]] # cm, c2, k, light, shortcut - [-1, 6, HGBlock, [256, 1024, 5, True, True]] - [-1, 6, HGBlock, [256, 1024, 5, True, True]] - [-1, 6, HGBlock, [256, 1024, 5, True, True]] - - [-1, 6, HGBlock, [256, 1024, 5, True, True]] # 10-stage 3 + - [-1, 6, HGBlock, [256, 1024, 5, True, True]] # 10-stage 3 - - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 11-P4/32 + - [-1, 1, DWConv, [1024, 3, 2, 1, False]] # 11-P4/32 - [-1, 6, HGBlock, [512, 2048, 5, True, False]] - - [-1, 6, HGBlock, [512, 2048, 5, True, True]] # 13-stage 4 + - [-1, 6, HGBlock, [512, 2048, 5, True, True]] # 13-stage 4 head: - - [-1, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 14 input_proj.2 + - [-1, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 14 input_proj.2 - [-1, 1, AIFI, [2048, 8]] - - [-1, 1, Conv, [384, 1, 1]] # 16, Y5, lateral_convs.0 + - [-1, 1, Conv, [384, 1, 1]] # 16, Y5, lateral_convs.0 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [10, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 18 input_proj.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [10, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 18 input_proj.1 - [[-2, -1], 1, Concat, [1]] - - [-1, 3, RepC3, [384]] # 20, fpn_blocks.0 - - [-1, 1, Conv, [384, 1, 1]] # 21, Y4, lateral_convs.1 + - [-1, 3, RepC3, [384]] # 20, fpn_blocks.0 + - [-1, 1, Conv, [384, 1, 1]] # 21, Y4, lateral_convs.1 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [4, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 23 input_proj.0 - - [[-2, -1], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, RepC3, [384]] # X3 (25), fpn_blocks.1 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [4, 1, Conv, [384, 1, 1, None, 1, 1, False]] # 23 input_proj.0 + - [[-2, -1], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, RepC3, [384]] # X3 (25), fpn_blocks.1 - - [-1, 1, Conv, [384, 3, 2]] # 26, downsample_convs.0 - - [[-1, 21], 1, Concat, [1]] # cat Y4 - - [-1, 3, RepC3, [384]] # F4 (28), pan_blocks.0 + - [-1, 1, Conv, [384, 3, 2]] # 26, downsample_convs.0 + - [[-1, 21], 1, Concat, [1]] # cat Y4 + - [-1, 3, RepC3, [384]] # F4 (28), pan_blocks.0 - - [-1, 1, Conv, [384, 3, 2]] # 29, downsample_convs.1 - - [[-1, 16], 1, Concat, [1]] # cat Y5 - - [-1, 3, RepC3, [384]] # F5 (31), pan_blocks.1 + - [-1, 1, Conv, [384, 3, 2]] # 29, downsample_convs.1 + - [[-1, 16], 1, Concat, [1]] # cat Y5 + - [-1, 3, RepC3, [384]] # F5 (31), pan_blocks.1 - - [[25, 28, 31], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[25, 28, 31], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v3/yolov3-spp.yaml b/ultralytics/cfg/models/v3/yolov3-spp.yaml index aec1bb202..6724f4e9f 100644 --- a/ultralytics/cfg/models/v3/yolov3-spp.yaml +++ b/ultralytics/cfg/models/v3/yolov3-spp.yaml @@ -2,24 +2,24 @@ # YOLOv3-SPP object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 # Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple # darknet53 backbone backbone: # [from, number, module, args] - - [-1, 1, Conv, [32, 3, 1]] # 0 - - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Conv, [32, 3, 1]] # 0 + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 - [-1, 1, Bottleneck, [64]] - - [-1, 1, Conv, [128, 3, 2]] # 3-P2/4 + - [-1, 1, Conv, [128, 3, 2]] # 3-P2/4 - [-1, 2, Bottleneck, [128]] - - [-1, 1, Conv, [256, 3, 2]] # 5-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 5-P3/8 - [-1, 8, Bottleneck, [256]] - - [-1, 1, Conv, [512, 3, 2]] # 7-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 7-P4/16 - [-1, 8, Bottleneck, [512]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P5/32 - - [-1, 4, Bottleneck, [1024]] # 10 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P5/32 + - [-1, 4, Bottleneck, [1024]] # 10 # YOLOv3-SPP head head: @@ -27,20 +27,20 @@ head: - [-1, 1, SPP, [512, [5, 9, 13]]] - [-1, 1, Conv, [1024, 3, 1]] - [-1, 1, Conv, [512, 1, 1]] - - [-1, 1, Conv, [1024, 3, 1]] # 15 (P5/32-large) + - [-1, 1, Conv, [1024, 3, 1]] # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P4 - [-1, 1, Bottleneck, [512, False]] - [-1, 1, Bottleneck, [512, False]] - [-1, 1, Conv, [256, 1, 1]] - - [-1, 1, Conv, [512, 3, 1]] # 22 (P4/16-medium) + - [-1, 1, Conv, [512, 3, 1]] # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P3 - [-1, 1, Bottleneck, [256, False]] - - [-1, 2, Bottleneck, [256, False]] # 27 (P3/8-small) + - [-1, 2, Bottleneck, [256, False]] # 27 (P3/8-small) - - [[27, 22, 15], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[27, 22, 15], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v3/yolov3-tiny.yaml b/ultralytics/cfg/models/v3/yolov3-tiny.yaml index 8330e4924..f3fe25780 100644 --- a/ultralytics/cfg/models/v3/yolov3-tiny.yaml +++ b/ultralytics/cfg/models/v3/yolov3-tiny.yaml @@ -2,36 +2,36 @@ # YOLOv3-tiny object detection model with P4-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 # Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple # YOLOv3-tiny backbone backbone: # [from, number, module, args] - - [-1, 1, Conv, [16, 3, 1]] # 0 - - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 1-P1/2 + - [-1, 1, Conv, [16, 3, 1]] # 0 + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 1-P1/2 - [-1, 1, Conv, [32, 3, 1]] - - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 3-P2/4 + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 3-P2/4 - [-1, 1, Conv, [64, 3, 1]] - - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 5-P3/8 + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 5-P3/8 - [-1, 1, Conv, [128, 3, 1]] - - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 7-P4/16 + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 7-P4/16 - [-1, 1, Conv, [256, 3, 1]] - - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 9-P5/32 + - [-1, 1, nn.MaxPool2d, [2, 2, 0]] # 9-P5/32 - [-1, 1, Conv, [512, 3, 1]] - - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]] # 11 - - [-1, 1, nn.MaxPool2d, [2, 1, 0]] # 12 + - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]] # 11 + - [-1, 1, nn.MaxPool2d, [2, 1, 0]] # 12 # YOLOv3-tiny head head: - [-1, 1, Conv, [1024, 3, 1]] - [-1, 1, Conv, [256, 1, 1]] - - [-1, 1, Conv, [512, 3, 1]] # 15 (P5/32-large) + - [-1, 1, Conv, [512, 3, 1]] # 15 (P5/32-large) - [-2, 1, Conv, [128, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P4 - - [-1, 1, Conv, [256, 3, 1]] # 19 (P4/16-medium) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, Conv, [256, 3, 1]] # 19 (P4/16-medium) - - [[19, 15], 1, Detect, [nc]] # Detect(P4, P5) + - [[19, 15], 1, Detect, [nc]] # Detect(P4, P5) diff --git a/ultralytics/cfg/models/v3/yolov3.yaml b/ultralytics/cfg/models/v3/yolov3.yaml index f03706fc2..716866a97 100644 --- a/ultralytics/cfg/models/v3/yolov3.yaml +++ b/ultralytics/cfg/models/v3/yolov3.yaml @@ -2,24 +2,24 @@ # YOLOv3 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov3 # Parameters -nc: 80 # number of classes -depth_multiple: 1.0 # model depth multiple -width_multiple: 1.0 # layer channel multiple +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple # darknet53 backbone backbone: # [from, number, module, args] - - [-1, 1, Conv, [32, 3, 1]] # 0 - - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Conv, [32, 3, 1]] # 0 + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 - [-1, 1, Bottleneck, [64]] - - [-1, 1, Conv, [128, 3, 2]] # 3-P2/4 + - [-1, 1, Conv, [128, 3, 2]] # 3-P2/4 - [-1, 2, Bottleneck, [128]] - - [-1, 1, Conv, [256, 3, 2]] # 5-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 5-P3/8 - [-1, 8, Bottleneck, [256]] - - [-1, 1, Conv, [512, 3, 2]] # 7-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 7-P4/16 - [-1, 8, Bottleneck, [512]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P5/32 - - [-1, 4, Bottleneck, [1024]] # 10 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P5/32 + - [-1, 4, Bottleneck, [1024]] # 10 # YOLOv3 head head: @@ -27,20 +27,20 @@ head: - [-1, 1, Conv, [512, 1, 1]] - [-1, 1, Conv, [1024, 3, 1]] - [-1, 1, Conv, [512, 1, 1]] - - [-1, 1, Conv, [1024, 3, 1]] # 15 (P5/32-large) + - [-1, 1, Conv, [1024, 3, 1]] # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P4 - [-1, 1, Bottleneck, [512, False]] - [-1, 1, Bottleneck, [512, False]] - [-1, 1, Conv, [256, 1, 1]] - - [-1, 1, Conv, [512, 3, 1]] # 22 (P4/16-medium) + - [-1, 1, Conv, [512, 3, 1]] # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P3 - [-1, 1, Bottleneck, [256, False]] - - [-1, 2, Bottleneck, [256, False]] # 27 (P3/8-small) + - [-1, 2, Bottleneck, [256, False]] # 27 (P3/8-small) - - [[27, 22, 15], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[27, 22, 15], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v5/yolov5-p6.yaml b/ultralytics/cfg/models/v5/yolov5-p6.yaml index 13df45865..2fd3ac71b 100644 --- a/ultralytics/cfg/models/v5/yolov5-p6.yaml +++ b/ultralytics/cfg/models/v5/yolov5-p6.yaml @@ -2,7 +2,7 @@ # YOLOv5 object detection model with P3-P6 outputs. For details see https://docs.ultralytics.com/models/yolov5 # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will call yolov5-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,46 +14,46 @@ scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will ca # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - - [-1, 1, Conv, [64, 6, 2, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 6, 2, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C3, [128]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C3, [256]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 9, C3, [512]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C3, [768]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C3, [1024]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv5 v6.0 head head: - [-1, 1, Conv, [768, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C3, [768, False]] # 15 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C3, [768, False]] # 15 - [-1, 1, Conv, [512, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C3, [512, False]] # 19 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3, [512, False]] # 19 - [-1, 1, Conv, [256, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C3, [256, False]] # 23 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3, [256, False]] # 23 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 20], 1, Concat, [1]] # cat head P4 - - [-1, 3, C3, [512, False]] # 26 (P4/16-medium) + - [[-1, 20], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3, [512, False]] # 26 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 16], 1, Concat, [1]] # cat head P5 - - [-1, 3, C3, [768, False]] # 29 (P5/32-large) + - [[-1, 16], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3, [768, False]] # 29 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P6 - - [-1, 3, C3, [1024, False]] # 32 (P6/64-xlarge) + - [[-1, 12], 1, Concat, [1]] # cat head P6 + - [-1, 3, C3, [1024, False]] # 32 (P6/64-xlarge) - - [[23, 26, 29, 32], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) + - [[23, 26, 29, 32], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v5/yolov5.yaml b/ultralytics/cfg/models/v5/yolov5.yaml index f024065eb..8fdc79ebf 100644 --- a/ultralytics/cfg/models/v5/yolov5.yaml +++ b/ultralytics/cfg/models/v5/yolov5.yaml @@ -2,7 +2,7 @@ # YOLOv5 object detection model with P3-P5 outputs. For details see https://docs.ultralytics.com/models/yolov5 # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call yolov5.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,36 +14,35 @@ scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call # YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - - [-1, 1, Conv, [64, 6, 2, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 6, 2, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C3, [128]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C3, [256]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 9, C3, [512]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C3, [1024]] - - [-1, 1, SPPF, [1024, 5]] # 9 - + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv5 v6.0 head head: - [-1, 1, Conv, [512, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C3, [512, False]] # 13 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3, [512, False]] # 13 - [-1, 1, Conv, [256, 1, 1]] - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C3, [256, False]] # 17 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3, [256, False]] # 17 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P4 - - [-1, 3, C3, [512, False]] # 20 (P4/16-medium) + - [[-1, 14], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3, [512, False]] # 20 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 10], 1, Concat, [1]] # cat head P5 - - [-1, 3, C3, [1024, False]] # 23 (P5/32-large) + - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3, [1024, False]] # 23 (P5/32-large) - - [[17, 20, 23], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[17, 20, 23], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v6/yolov6.yaml b/ultralytics/cfg/models/v6/yolov6.yaml index cb5e32ac3..f39dfb492 100644 --- a/ultralytics/cfg/models/v6/yolov6.yaml +++ b/ultralytics/cfg/models/v6/yolov6.yaml @@ -2,8 +2,8 @@ # YOLOv6 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/models/yolov6 # Parameters -nc: 80 # number of classes -activation: nn.ReLU() # (optional) model default activation function +nc: 80 # number of classes +activation: nn.ReLU() # (optional) model default activation function scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -15,39 +15,39 @@ scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call # YOLOv6-3.0s backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 6, Conv, [128, 3, 1]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 12, Conv, [256, 3, 1]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 18, Conv, [512, 3, 1]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 6, Conv, [1024, 3, 1]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv6-3.0s head head: - [-1, 1, Conv, [256, 1, 1]] - [-1, 1, nn.ConvTranspose2d, [256, 2, 2, 0]] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - [-1, 1, Conv, [256, 3, 1]] - - [-1, 9, Conv, [256, 3, 1]] # 14 + - [-1, 9, Conv, [256, 3, 1]] # 14 - [-1, 1, Conv, [128, 1, 1]] - [-1, 1, nn.ConvTranspose2d, [128, 2, 2, 0]] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - [-1, 1, Conv, [128, 3, 1]] - - [-1, 9, Conv, [128, 3, 1]] # 19 + - [-1, 9, Conv, [128, 3, 1]] # 19 - [-1, 1, Conv, [128, 3, 2]] - - [[-1, 15], 1, Concat, [1]] # cat head P4 + - [[-1, 15], 1, Concat, [1]] # cat head P4 - [-1, 1, Conv, [256, 3, 1]] - - [-1, 9, Conv, [256, 3, 1]] # 23 + - [-1, 9, Conv, [256, 3, 1]] # 23 - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 10], 1, Concat, [1]] # cat head P5 + - [[-1, 10], 1, Concat, [1]] # cat head P5 - [-1, 1, Conv, [512, 3, 1]] - - [-1, 9, Conv, [512, 3, 1]] # 27 + - [-1, 9, Conv, [512, 3, 1]] # 27 - - [[19, 23, 27], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[19, 23, 27], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-cls.yaml b/ultralytics/cfg/models/v8/yolov8-cls.yaml index 5332f1d64..180fc65a5 100644 --- a/ultralytics/cfg/models/v8/yolov8-cls.yaml +++ b/ultralytics/cfg/models/v8/yolov8-cls.yaml @@ -2,7 +2,7 @@ # YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify # Parameters -nc: 1000 # number of classes +nc: 1000 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,16 +14,16 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will c # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] # YOLOv8.0n head head: - - [-1, 1, Classify, [nc]] # Classify + - [-1, 1, Classify, [nc]] # Classify diff --git a/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml b/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml index 2295f122d..aee209349 100644 --- a/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +++ b/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml @@ -2,53 +2,53 @@ # YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] # YOLOv8n-ghost-p2 summary: 491 layers, 2033944 parameters, 2033928 gradients, 13.8 GFLOPs s: [0.33, 0.50, 1024] # YOLOv8s-ghost-p2 summary: 491 layers, 5562080 parameters, 5562064 gradients, 25.1 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m-ghost-p2 summary: 731 layers, 9031728 parameters, 9031712 gradients, 42.8 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l-ghost-p2 summary: 971 layers, 12214448 parameters, 12214432 gradients, 69.1 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x-ghost-p2 summary: 971 layers, 18664776 parameters, 18664760 gradients, 103.3 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m-ghost-p2 summary: 731 layers, 9031728 parameters, 9031712 gradients, 42.8 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l-ghost-p2 summary: 971 layers, 12214448 parameters, 12214432 gradients, 69.1 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x-ghost-p2 summary: 971 layers, 18664776 parameters, 18664760 gradients, 103.3 GFLOPs # YOLOv8.0-ghost backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C3Ghost, [128, True]] - - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C3Ghost, [256, True]] - - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C3Ghost, [512, True]] - - [-1, 1, GhostConv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, GhostConv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C3Ghost, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0-ghost-p2 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C3Ghost, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3Ghost, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C3Ghost, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3Ghost, [256]] # 15 (P3/8-small) - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 2], 1, Concat, [1]] # cat backbone P2 - - [-1, 3, C3Ghost, [128]] # 18 (P2/4-xsmall) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 2], 1, Concat, [1]] # cat backbone P2 + - [-1, 3, C3Ghost, [128]] # 18 (P2/4-xsmall) - [-1, 1, GhostConv, [128, 3, 2]] - - [[-1, 15], 1, Concat, [1]] # cat head P3 - - [-1, 3, C3Ghost, [256]] # 21 (P3/8-small) + - [[-1, 15], 1, Concat, [1]] # cat head P3 + - [-1, 3, C3Ghost, [256]] # 21 (P3/8-small) - [-1, 1, GhostConv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C3Ghost, [512]] # 24 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3Ghost, [512]] # 24 (P4/16-medium) - [-1, 1, GhostConv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C3Ghost, [1024]] # 27 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3Ghost, [1024]] # 27 (P5/32-large) - - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) + - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml b/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml index 21f5b820a..b35f4cdb6 100644 --- a/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml @@ -2,55 +2,55 @@ # YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] # YOLOv8n-ghost-p6 summary: 529 layers, 2901100 parameters, 2901084 gradients, 5.8 GFLOPs s: [0.33, 0.50, 1024] # YOLOv8s-ghost-p6 summary: 529 layers, 9520008 parameters, 9519992 gradients, 16.4 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m-ghost-p6 summary: 789 layers, 18002904 parameters, 18002888 gradients, 34.4 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l-ghost-p6 summary: 1049 layers, 21227584 parameters, 21227568 gradients, 55.3 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x-ghost-p6 summary: 1049 layers, 33057852 parameters, 33057836 gradients, 85.7 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m-ghost-p6 summary: 789 layers, 18002904 parameters, 18002888 gradients, 34.4 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l-ghost-p6 summary: 1049 layers, 21227584 parameters, 21227568 gradients, 55.3 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x-ghost-p6 summary: 1049 layers, 33057852 parameters, 33057836 gradients, 85.7 GFLOPs # YOLOv8.0-ghost backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C3Ghost, [128, True]] - - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C3Ghost, [256, True]] - - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C3Ghost, [512, True]] - - [-1, 1, GhostConv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, GhostConv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C3Ghost, [768, True]] - - [-1, 1, GhostConv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, GhostConv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C3Ghost, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0-ghost-p6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C3Ghost, [768]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C3Ghost, [768]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C3Ghost, [512]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3Ghost, [512]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C3Ghost, [256]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3Ghost, [256]] # 20 (P3/8-small) - [-1, 1, GhostConv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C3Ghost, [512]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3Ghost, [512]] # 23 (P4/16-medium) - [-1, 1, GhostConv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C3Ghost, [768]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3Ghost, [768]] # 26 (P5/32-large) - [-1, 1, GhostConv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C3Ghost, [1024]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C3Ghost, [1024]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-ghost.yaml b/ultralytics/cfg/models/v8/yolov8-ghost.yaml index c997fd106..adc180232 100644 --- a/ultralytics/cfg/models/v8/yolov8-ghost.yaml +++ b/ultralytics/cfg/models/v8/yolov8-ghost.yaml @@ -3,45 +3,45 @@ # Employs Ghost convolutions and modules proposed in Huawei's GhostNet in https://arxiv.org/abs/1911.11907v2 # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] - n: [0.33, 0.25, 1024] # YOLOv8n-ghost summary: 403 layers, 1865316 parameters, 1865300 gradients, 5.8 GFLOPs - s: [0.33, 0.50, 1024] # YOLOv8s-ghost summary: 403 layers, 5960072 parameters, 5960056 gradients, 16.4 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m-ghost summary: 603 layers, 10336312 parameters, 10336296 gradients, 32.7 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l-ghost summary: 803 layers, 14277872 parameters, 14277856 gradients, 53.7 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x-ghost summary: 803 layers, 22229308 parameters, 22229292 gradients, 83.3 GFLOPs + n: [0.33, 0.25, 1024] # YOLOv8n-ghost summary: 403 layers, 1865316 parameters, 1865300 gradients, 5.8 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s-ghost summary: 403 layers, 5960072 parameters, 5960056 gradients, 16.4 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m-ghost summary: 603 layers, 10336312 parameters, 10336296 gradients, 32.7 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l-ghost summary: 803 layers, 14277872 parameters, 14277856 gradients, 53.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x-ghost summary: 803 layers, 22229308 parameters, 22229292 gradients, 83.3 GFLOPs # YOLOv8.0n-ghost backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, GhostConv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C3Ghost, [128, True]] - - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, GhostConv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C3Ghost, [256, True]] - - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, GhostConv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C3Ghost, [512, True]] - - [-1, 1, GhostConv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, GhostConv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C3Ghost, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C3Ghost, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C3Ghost, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C3Ghost, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C3Ghost, [256]] # 15 (P3/8-small) - [-1, 1, GhostConv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C3Ghost, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C3Ghost, [512]] # 18 (P4/16-medium) - [-1, 1, GhostConv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C3Ghost, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C3Ghost, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-obb.yaml b/ultralytics/cfg/models/v8/yolov8-obb.yaml index 049b92732..7a7f60cae 100644 --- a/ultralytics/cfg/models/v8/yolov8-obb.yaml +++ b/ultralytics/cfg/models/v8/yolov8-obb.yaml @@ -2,45 +2,45 @@ # YOLOv8 Oriented Bounding Boxes (OBB) model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] - n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs - s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, OBB, [nc, 1]] # OBB(P3, P4, P5) + - [[15, 18, 21], 1, OBB, [nc, 1]] # OBB(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-p2.yaml b/ultralytics/cfg/models/v8/yolov8-p2.yaml index 3e286aa96..5392774bb 100644 --- a/ultralytics/cfg/models/v8/yolov8-p2.yaml +++ b/ultralytics/cfg/models/v8/yolov8-p2.yaml @@ -2,7 +2,7 @@ # YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,41 +14,41 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call # YOLOv8.0 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0-p2 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 2], 1, Concat, [1]] # cat backbone P2 - - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 2], 1, Concat, [1]] # cat backbone P2 + - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall) - [-1, 1, Conv, [128, 3, 2]] - - [[-1, 15], 1, Concat, [1]] # cat head P3 - - [-1, 3, C2f, [256]] # 21 (P3/8-small) + - [[-1, 15], 1, Concat, [1]] # cat head P3 + - [-1, 3, C2f, [256]] # 21 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 24 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 24 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 27 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 27 (P5/32-large) - - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) + - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-p6.yaml b/ultralytics/cfg/models/v8/yolov8-p6.yaml index 3635ed97e..2d6d5f978 100644 --- a/ultralytics/cfg/models/v8/yolov8-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-p6.yaml @@ -2,7 +2,7 @@ # YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,43 +14,43 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will ca # YOLOv8.0x6 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [768, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0x6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C2, [768, False]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2, [512, False]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml b/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml index abf0cfcf9..60007ace1 100644 --- a/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml @@ -2,8 +2,8 @@ # YOLOv8-pose-p6 keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose # Parameters -nc: 1 # number of classes -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +nc: 1 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -15,43 +15,43 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will ca # YOLOv8.0x6 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [768, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0x6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C2, [768, False]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2, [512, False]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-pose.yaml b/ultralytics/cfg/models/v8/yolov8-pose.yaml index 9f48e1ead..60388ef59 100644 --- a/ultralytics/cfg/models/v8/yolov8-pose.yaml +++ b/ultralytics/cfg/models/v8/yolov8-pose.yaml @@ -2,8 +2,8 @@ # YOLOv8-pose keypoints/pose estimation model. For Usage examples see https://docs.ultralytics.com/tasks/pose # Parameters -nc: 1 # number of classes -kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) +nc: 1 # number of classes +kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible) scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will call yolov8-pose.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -15,33 +15,33 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-pose.yaml' will # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5) + - [[15, 18, 21], 1, Pose, [nc, kpt_shape]] # Pose(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml b/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml index a0581068f..27b790b10 100644 --- a/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +++ b/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml @@ -2,45 +2,45 @@ # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] - n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs - s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) + - [[15, 18, 21], 1, RTDETRDecoder, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml b/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml index 5ac093620..78c0444c8 100644 --- a/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +++ b/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml @@ -2,7 +2,7 @@ # YOLOv8-seg-p6 instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-seg-p6.yaml' will call yolov8-seg-p6.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,43 +14,43 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-seg-p6.yaml' wil # YOLOv8.0x6 backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [768, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 11 + - [-1, 1, SPPF, [1024, 5]] # 11 # YOLOv8.0x6 head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 8], 1, Concat, [1]] # cat backbone P5 - - [-1, 3, C2, [768, False]] # 14 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2, [512, False]] # 17 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 17], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 14], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) - [-1, 1, Conv, [768, 3, 2]] - - [[-1, 11], 1, Concat, [1]] # cat head P6 - - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) - - [[20, 23, 26, 29], 1, Segment, [nc, 32, 256]] # Pose(P3, P4, P5, P6) + - [[20, 23, 26, 29], 1, Segment, [nc, 32, 256]] # Pose(P3, P4, P5, P6) diff --git a/ultralytics/cfg/models/v8/yolov8-seg.yaml b/ultralytics/cfg/models/v8/yolov8-seg.yaml index fbb08fc45..700b7951d 100644 --- a/ultralytics/cfg/models/v8/yolov8-seg.yaml +++ b/ultralytics/cfg/models/v8/yolov8-seg.yaml @@ -2,7 +2,7 @@ # YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n' # [depth, width, max_channels] n: [0.33, 0.25, 1024] @@ -14,33 +14,33 @@ scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will c # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) + - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) diff --git a/ultralytics/cfg/models/v8/yolov8.yaml b/ultralytics/cfg/models/v8/yolov8.yaml index 2255450f1..b328e98a1 100644 --- a/ultralytics/cfg/models/v8/yolov8.yaml +++ b/ultralytics/cfg/models/v8/yolov8.yaml @@ -2,45 +2,45 @@ # YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect # Parameters -nc: 80 # number of classes +nc: 80 # number of classes scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' # [depth, width, max_channels] - n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs - s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs - m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs - l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs - x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs # YOLOv8.0n backbone backbone: # [from, repeats, module, args] - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - [-1, 3, C2f, [128, True]] - - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 - [-1, 6, C2f, [256, True]] - - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 - [-1, 6, C2f, [512, True]] - - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 - [-1, 3, C2f, [1024, True]] - - [-1, 1, SPPF, [1024, 5]] # 9 + - [-1, 1, SPPF, [1024, 5]] # 9 # YOLOv8.0n head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 3, C2f, [512]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 3, C2f, [256]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/trackers/botsort.yaml b/ultralytics/cfg/trackers/botsort.yaml index cbbf348c2..0c66dc6cc 100644 --- a/ultralytics/cfg/trackers/botsort.yaml +++ b/ultralytics/cfg/trackers/botsort.yaml @@ -1,17 +1,17 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT -tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] -track_high_thresh: 0.5 # threshold for the first association -track_low_thresh: 0.1 # threshold for the second association -new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks -track_buffer: 30 # buffer to calculate the time when to remove tracks -match_thresh: 0.8 # threshold for matching tracks +tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) # mot20: False # for tracker evaluation(not used for now) # BoT-SORT settings -gmc_method: sparseOptFlow # method of global motion compensation +gmc_method: sparseOptFlow # method of global motion compensation # ReID model related thresh (not supported yet) proximity_thresh: 0.5 appearance_thresh: 0.25 diff --git a/ultralytics/cfg/trackers/bytetrack.yaml b/ultralytics/cfg/trackers/bytetrack.yaml index 5060f9262..29d352c6a 100644 --- a/ultralytics/cfg/trackers/bytetrack.yaml +++ b/ultralytics/cfg/trackers/bytetrack.yaml @@ -1,11 +1,11 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license # Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack -tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack'] -track_high_thresh: 0.5 # threshold for the first association -track_low_thresh: 0.1 # threshold for the second association -new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks -track_buffer: 30 # buffer to calculate the time when to remove tracks -match_thresh: 0.8 # threshold for matching tracks +tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) # mot20: False # for tracker evaluation(not used for now)