Merge branch 'main' into benchmark-format-args

benchmark-format-args
Ultralytics Assistant 6 months ago committed by GitHub
commit 69e9ab6864
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 2
      .github/workflows/ci.yaml
  2. 2
      .github/workflows/docs.yml
  3. 83
      .github/workflows/publish.yml
  4. 2
      docs/build_docs.py
  5. 1
      docs/en/datasets/detect/index.md
  6. 3
      docs/en/datasets/index.md
  7. 1
      docs/en/datasets/segment/index.md
  8. 166
      docs/mkdocs_github_authors.yaml
  9. 2
      examples/heatmaps.ipynb
  10. 2
      examples/object_counting.ipynb
  11. 3
      pyproject.toml
  12. 5
      tests/test_explorer.py
  13. 2
      tests/test_python.py
  14. 2
      ultralytics/__init__.py
  15. 2
      ultralytics/data/converter.py
  16. 2
      ultralytics/data/dataset.py
  17. 2
      ultralytics/data/split_dota.py
  18. 54
      ultralytics/engine/exporter.py
  19. 2
      ultralytics/engine/predictor.py
  20. 5
      ultralytics/engine/validator.py
  21. 17
      ultralytics/hub/utils.py
  22. 2
      ultralytics/models/fastsam/predict.py
  23. 2
      ultralytics/models/sam/model.py
  24. 2
      ultralytics/models/sam/modules/blocks.py
  25. 7
      ultralytics/models/sam/modules/encoders.py
  26. 24
      ultralytics/models/sam/modules/sam.py
  27. 23
      ultralytics/models/sam/modules/tiny_encoder.py
  28. 19
      ultralytics/models/sam/predict.py
  29. 4
      ultralytics/nn/modules/block.py
  30. 4
      ultralytics/nn/modules/head.py
  31. 2
      ultralytics/solutions/parking_management.py
  32. 6
      ultralytics/utils/benchmarks.py
  33. 11
      ultralytics/utils/checks.py
  34. 2
      ultralytics/utils/metrics.py
  35. 2
      ultralytics/utils/ops.py
  36. 7
      ultralytics/utils/torch_utils.py

@ -156,7 +156,7 @@ jobs:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-14]
os: [ubuntu-latest, macos-14, windows-latest]
python-version: ["3.11"]
torch: [latest]
include:

@ -46,7 +46,7 @@ jobs:
run: pip install ruff black tqdm mkdocs-material "mkdocstrings[python]" mkdocs-jupyter mkdocs-redirects mkdocs-ultralytics-plugin mkdocs-macros-plugin
- name: Ruff fixes
continue-on-error: true
run: ruff check --fix --fix-unsafe --select D --ignore=D100,D104,D203,D205,D212,D213,D401,D406,D407,D413 .
run: ruff check --fix --unsafe-fixes --select D --ignore=D100,D104,D203,D205,D212,D213,D401,D406,D407,D413 .
- name: Update Docs Reference Section and Push Changes
if: github.event_name == 'pull_request_target'
run: |

@ -34,7 +34,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip wheel
pip install openai requests build twine toml
pip install requests build twine toml
- name: Check PyPI version
shell: python
run: |
@ -79,8 +79,8 @@ jobs:
publish = True # First release
os.system(f'echo "increment={publish}" >> $GITHUB_OUTPUT')
os.system(f'echo "version={local_version}" >> $GITHUB_OUTPUT')
os.system(f'echo "previous_version={online_version or "N/A"}" >> $GITHUB_OUTPUT')
os.system(f'echo "current_tag=v{local_version}" >> $GITHUB_OUTPUT')
os.system(f'echo "previous_tag=v{online_version}" >> $GITHUB_OUTPUT')
if publish:
print('Ready to publish new version to PyPI ✅.')
@ -88,81 +88,18 @@ jobs:
- name: Publish new tag
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
run: |
git tag -a "v${{ steps.check_pypi.outputs.version }}" -m "$(git log -1 --pretty=%B)" # i.e. "v0.1.2 commit message"
git push origin "v${{ steps.check_pypi.outputs.version }}"
git tag -a "${{ steps.check_pypi.outputs.current_tag }}" -m "$(git log -1 --pretty=%B)" # i.e. "v0.1.2 commit message"
git push origin "${{ steps.check_pypi.outputs.current_tag }}"
- name: Publish new release
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }}
CURRENT_TAG: ${{ steps.check_pypi.outputs.version }}
PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_version }}
shell: python
CURRENT_TAG: ${{ steps.check_pypi.outputs.current_tag }}
PREVIOUS_TAG: ${{ steps.check_pypi.outputs.previous_tag }}
run: |
import openai
import os
import requests
import json
import subprocess
# Retrieve environment variables
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
CURRENT_TAG = os.getenv('CURRENT_TAG')
PREVIOUS_TAG = os.getenv('PREVIOUS_TAG')
# Check for required environment variables
if not all([OPENAI_API_KEY, GITHUB_TOKEN, CURRENT_TAG, PREVIOUS_TAG]):
raise ValueError("One or more required environment variables are missing.")
latest_tag = f"v{CURRENT_TAG}"
previous_tag = f"v{PREVIOUS_TAG}"
repo = os.getenv('GITHUB_REPOSITORY')
headers = {"Authorization": f"token {GITHUB_TOKEN}", "Accept": "application/vnd.github.v3.diff"}
# Get the diff between the tags
url = f"https://api.github.com/repos/{repo}/compare/{previous_tag}...{latest_tag}"
response = requests.get(url, headers=headers)
diff = response.text if response.status_code == 200 else f"Failed to get diff: {response.content}"
# Get summary
messages = [
{
"role": "system",
"content": "You are an Ultralytics AI assistant skilled in software development and technical communication. Your task is to summarize GitHub releases in a way that is detailed, accurate, and understandable to both expert developers and non-expert users. Focus on highlighting the key changes and their impact in simple and intuitive terms."
},
{
"role": "user",
"content": f"Summarize the updates made in the '{latest_tag}' tag, focusing on major changes, their purpose, and potential impact. Keep the summary clear and suitable for a broad audience. Add emojis to enliven the summary. Reply directly with a summary along these example guidelines, though feel free to adjust as appropriate:\n\n"
f"## 🌟 Summary (single-line synopsis)\n"
f"## 📊 Key Changes (bullet points highlighting any major changes)\n"
f"## 🎯 Purpose & Impact (bullet points explaining any benefits and potential impact to users)\n"
f"\n\nHere's the release diff:\n\n{diff[:300000]}",
}
]
client = openai.OpenAI(api_key=OPENAI_API_KEY)
completion = client.chat.completions.create(model="gpt-4o-2024-08-06", messages=messages)
summary = completion.choices[0].message.content.strip()
# Get the latest commit message
commit_message = subprocess.run(['git', 'log', '-1', '--pretty=%B'], check=True, text=True, capture_output=True).stdout.split("\n")[0].strip()
# Prepare release data
release = {
'tag_name': latest_tag,
'name': f"{latest_tag} - {commit_message}",
'body': summary,
'draft': False,
'prerelease': False
}
# Create the release on GitHub
release_url = f"https://api.github.com/repos/{repo}/releases"
release_response = requests.post(release_url, headers=headers, data=json.dumps(release))
if release_response.status_code == 201:
print(f'Successfully created release {latest_tag}')
else:
print(f'Failed to create release {latest_tag}: {release_response.content}')
curl -s "https://raw.githubusercontent.com/ultralytics/actions/main/utils/summarize_release.py" | python -
shell: bash
- name: Publish to PyPI
continue-on-error: true
if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True'
@ -193,7 +130,7 @@ jobs:
uses: slackapi/slack-github-action@v1.26.0
with:
payload: |
{"text": "<!channel> GitHub Actions success for ${{ github.workflow }} ✅\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* NEW '${{ github.repository }} v${{ steps.check_pypi.outputs.version }}' pip package published 😃\n*Job Status:* ${{ job.status }}\n*Pull Request:* <https://github.com/${{ github.repository }}/pull/${{ env.PR_NUMBER }}> ${{ env.PR_TITLE }}\n"}
{"text": "<!channel> GitHub Actions success for ${{ github.workflow }} ✅\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* NEW '${{ github.repository }} ${{ steps.check_pypi.outputs.current_tag }}' pip package published 😃\n*Job Status:* ${{ job.status }}\n*Pull Request:* <https://github.com/${{ github.repository }}/pull/${{ env.PR_NUMBER }}> ${{ env.PR_TITLE }}\n"}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }}
- name: Notify on Slack (Failure)

@ -164,7 +164,7 @@ def update_docs_html():
# Convert plaintext links to HTML hyperlinks
files_modified = 0
for html_file in tqdm(SITE.rglob("*.html"), desc="Converting plaintext links"):
with open(html_file, "r", encoding="utf-8") as file:
with open(html_file, encoding="utf-8") as file:
content = file.read()
updated_content = convert_plaintext_links_to_html(content)
if updated_content != content:

@ -77,6 +77,7 @@ Here is a list of the supported datasets and a brief description for each:
- [COCO](coco.md): Common Objects in Context (COCO) is a large-scale object detection, segmentation, and captioning dataset with 80 object categories.
- [LVIS](lvis.md): A large-scale object detection, segmentation, and captioning dataset with 1203 object categories.
- [COCO8](coco8.md): A smaller subset of the first 4 images from COCO train and COCO val, suitable for quick tests.
- [COCO128](coco.md): A smaller subset of the first 128 images from COCO train and COCO val, suitable for tests.
- [Global Wheat 2020](globalwheat2020.md): A dataset containing images of wheat heads for the Global Wheat Challenge 2020.
- [Objects365](objects365.md): A high-quality, large-scale dataset for object detection with 365 object categories and over 600K annotated images.
- [OpenImagesV7](open-images-v7.md): A comprehensive dataset by Google with 1.7M train images and 42k validation images.

@ -38,6 +38,7 @@ Bounding box object detection is a computer vision technique that involves detec
- [COCO](detect/coco.md): Common Objects in Context (COCO) is a large-scale object detection, segmentation, and captioning dataset with 80 object categories.
- [LVIS](detect/lvis.md): A large-scale object detection, segmentation, and captioning dataset with 1203 object categories.
- [COCO8](detect/coco8.md): A smaller subset of the first 4 images from COCO train and COCO val, suitable for quick tests.
- [COCO128](detect/coco.md): A smaller subset of the first 128 images from COCO train and COCO val, suitable for tests.
- [Global Wheat 2020](detect/globalwheat2020.md): A dataset containing images of wheat heads for the Global Wheat Challenge 2020.
- [Objects365](detect/objects365.md): A high-quality, large-scale dataset for object detection with 365 object categories and over 600K annotated images.
- [OpenImagesV7](detect/open-images-v7.md): A comprehensive dataset by Google with 1.7M train images and 42k validation images.
@ -56,6 +57,7 @@ Instance segmentation is a computer vision technique that involves identifying a
- [COCO](segment/coco.md): A large-scale dataset designed for object detection, segmentation, and captioning tasks with over 200K labeled images.
- [COCO8-seg](segment/coco8-seg.md): A smaller dataset for instance segmentation tasks, containing a subset of 8 COCO images with segmentation annotations.
- [COCO128-seg](segment/coco.md): A smaller dataset for instance segmentation tasks, containing a subset of 128 COCO images with segmentation annotations.
- [Crack-seg](segment/crack-seg.md): Specifically crafted dataset for detecting cracks on roads and walls, applicable for both object detection and segmentation tasks.
- [Package-seg](segment/package-seg.md): Tailored dataset for identifying packages in warehouses or industrial settings, suitable for both object detection and segmentation applications.
- [Carparts-seg](segment/carparts-seg.md): Purpose-built dataset for identifying vehicle parts, catering to design, manufacturing, and research needs. It serves for both object detection and segmentation tasks.
@ -88,6 +90,7 @@ Image classification is a computer vision task that involves categorizing an ima
Oriented Bounding Boxes (OBB) is a method in computer vision for detecting angled objects in images using rotated bounding boxes, often applied to aerial and satellite imagery.
- [DOTA-v2](obb/dota-v2.md): A popular OBB aerial imagery dataset with 1.7 million instances and 11,268 images.
- [DOTA8](obb/dota8.md): A smaller subset of the first 8 images from the DOTAv1 split set, 4 for training and 4 for validation, suitable for quick tests.
## [Multi-Object Tracking](track/index.md)

@ -93,6 +93,7 @@ The `train` and `val` fields specify the paths to the directories containing the
- [COCO](coco.md): A comprehensive dataset for object detection, segmentation, and captioning, featuring over 200K labeled images across a wide range of categories.
- [COCO8-seg](coco8-seg.md): A compact, 8-image subset of COCO designed for quick testing of segmentation model training, ideal for CI checks and workflow validation in the `ultralytics` repository.
- [COCO128-seg](coco.md): A smaller dataset for instance segmentation tasks, containing a subset of 128 COCO images with segmentation annotations.
- [Carparts-seg](carparts-seg.md): A specialized dataset focused on the segmentation of car parts, ideal for automotive applications. It includes a variety of vehicles with detailed annotations of individual car components.
- [Crack-seg](crack-seg.md): A dataset tailored for the segmentation of cracks in various surfaces. Essential for infrastructure maintenance and quality control, it provides detailed imagery for training models to identify structural weaknesses.
- [Package-seg](package-seg.md): A dataset dedicated to the segmentation of different types of packaging materials and shapes. It's particularly useful for logistics and warehouse automation, aiding in the development of systems for package handling and sorting.

@ -1,46 +1,120 @@
116908874+jk4e@users.noreply.github.com: jk4e
1185102784@qq.com: Laughing-q
130829914+IvorZhu331@users.noreply.github.com: IvorZhu331
135830346+UltralyticsAssistant@users.noreply.github.com: UltralyticsAssistant
1579093407@qq.com: YOLOv5-Magic
17216799+ouphi@users.noreply.github.com: ouphi
17316848+maianumerosky@users.noreply.github.com: maianumerosky
34196005+fcakyon@users.noreply.github.com: fcakyon
37276661+capjamesg@users.noreply.github.com: capjamesg
39910262+ChaoningZhang@users.noreply.github.com: ChaoningZhang
40165666+berry-ding@users.noreply.github.com: berry-ding
47978446+sergiuwaxmann@users.noreply.github.com: sergiuwaxmann
48149018+zhixuwei@users.noreply.github.com: zhixuwei
49699333+dependabot[bot]@users.noreply.github.com: dependabot
52826299+Chayanonjackal@users.noreply.github.com: Chayanonjackal
53246858+hasanghaffari93@users.noreply.github.com: hasanghaffari93
60036186+mfloto@users.noreply.github.com: mfloto
61612323+Laughing-q@users.noreply.github.com: Laughing-q
62214284+Burhan-Q@users.noreply.github.com: Burhan-Q
68285002+Kayzwer@users.noreply.github.com: Kayzwer
75611662+tensorturtle@users.noreply.github.com: tensorturtle
78843978+Skillnoob@users.noreply.github.com: Skillnoob
79740115+0xSynapse@users.noreply.github.com: 0xSynapse
Francesco.mttl@gmail.com: ambitious-octopus
abirami.vina@gmail.com: abirami-vina
ahmelsamahy@gmail.com: Ahelsamahy
andrei.kochin@intel.com: andrei-kochin
ayush.chaurarsia@gmail.com: AyushExel
chr043416@gmail.com: RizwanMunawar
glenn.jocher@ultralytics.com: glenn-jocher
hnliu_2@stu.xidian.edu.cn: null
jpedrofonseca_94@hotmail.com: null
k-2feng@hotmail.com: null
lakshantha@ultralytics.com: lakshanthad
lakshanthad@yahoo.com: lakshanthad
muhammadrizwanmunawar123@gmail.com: RizwanMunawar
not.committed.yet: null
plashchynski@gmail.com: plashchynski
priytosh.revolution@live.com: priytosh-tripathi
rulosanti@gmail.com: null
shuizhuyuanluo@126.com: null
sometimesocrazy@gmail.com: null
stormsson@users.noreply.github.com: stormsson
waxmann.sergiu@me.com: sergiuwaxmann
web@ultralytics.com: UltralyticsAssistant
xinwang614@gmail.com: GreatV
116908874+jk4e@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/116908874?v=4
username: jk4e
130829914+IvorZhu331@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/130829914?v=4
username: IvorZhu331
135830346+UltralyticsAssistant@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/135830346?v=4
username: UltralyticsAssistant
1579093407@qq.com:
avatar: https://avatars.githubusercontent.com/u/160490334?v=4
username: YOLOv5-Magic
17216799+ouphi@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/17216799?v=4
username: ouphi
17316848+maianumerosky@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/17316848?v=4
username: maianumerosky
34196005+fcakyon@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/34196005?v=4
username: fcakyon
37276661+capjamesg@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/37276661?v=4
username: capjamesg
39910262+ChaoningZhang@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/39910262?v=4
username: ChaoningZhang
40165666+berry-ding@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/40165666?v=4
username: berry-ding
47978446+sergiuwaxmann@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/47978446?v=4
username: sergiuwaxmann
48149018+zhixuwei@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/48149018?v=4
username: zhixuwei
49699333+dependabot[bot]@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/27347476?v=4
username: dependabot[bot]
53246858+hasanghaffari93@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/53246858?v=4
username: hasanghaffari93
60036186+mfloto@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/60036186?v=4
username: mfloto
61612323+Laughing-q@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/61612323?v=4
username: Laughing-q
62214284+Burhan-Q@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/62214284?v=4
username: Burhan-Q
68285002+Kayzwer@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/68285002?v=4
username: Kayzwer
75611662+tensorturtle@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/75611662?v=4
username: tensorturtle
78843978+Skillnoob@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/78843978?v=4
username: Skillnoob
79740115+0xSynapse@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/79740115?v=4
username: 0xSynapse
Francesco.mttl@gmail.com:
avatar: https://avatars.githubusercontent.com/u/3855193?v=4
username: ambitious-octopus
abirami.vina@gmail.com:
avatar: https://avatars.githubusercontent.com/u/25847604?v=4
username: abirami-vina
ahmelsamahy@gmail.com:
avatar: https://avatars.githubusercontent.com/u/10195309?v=4
username: Ahelsamahy
andrei.kochin@intel.com:
avatar: https://avatars.githubusercontent.com/u/72827868?v=4
username: andrei-kochin
ayush.chaurarsia@gmail.com:
avatar: https://avatars.githubusercontent.com/u/15766192?v=4
username: AyushExel
chr043416@gmail.com:
avatar: https://avatars.githubusercontent.com/u/62513924?v=4
username: RizwanMunawar
glenn.jocher@ultralytics.com:
avatar: https://avatars.githubusercontent.com/u/26833433?v=4
username: glenn-jocher
hnliu_2@stu.xidian.edu.cn:
avatar: null
username: null
jpedrofonseca_94@hotmail.com:
avatar: null
username: null
k-2feng@hotmail.com:
avatar: null
username: null
lakshanthad@yahoo.com:
avatar: https://avatars.githubusercontent.com/u/20147381?v=4
username: lakshanthad
muhammadrizwanmunawar123@gmail.com:
avatar: https://avatars.githubusercontent.com/u/62513924?v=4
username: RizwanMunawar
plashchynski@gmail.com:
avatar: https://avatars.githubusercontent.com/u/30833?v=4
username: plashchynski
priytosh.revolution@live.com:
avatar: https://avatars.githubusercontent.com/u/19519529?v=4
username: priytosh-tripathi
rulosanti@gmail.com:
avatar: null
username: null
shuizhuyuanluo@126.com:
avatar: null
username: null
sometimesocrazy@gmail.com:
avatar: null
username: null
stormsson@users.noreply.github.com:
avatar: https://avatars.githubusercontent.com/u/1133032?v=4
username: stormsson
xinwang614@gmail.com:
avatar: https://avatars.githubusercontent.com/u/17264618?v=4
username: GreatV

@ -116,7 +116,7 @@
" colormap=cv2.COLORMAP_PARULA,\n",
" view_img=True,\n",
" shape=\"circle\",\n",
" classes_names=model.names,\n",
" names=model.names,\n",
")\n",
"\n",
"while cap.isOpened():\n",

@ -129,7 +129,7 @@
"counter = solutions.ObjectCounter(\n",
" view_img=True, # Display the image during processing\n",
" reg_pts=line_points, # Region of interest points\n",
" classes_names=model.names, # Class names from the YOLO model\n",
" names=model.names, # Class names from the YOLO model\n",
" draw_tracks=True, # Draw tracking lines for objects\n",
" line_thickness=2, # Thickness of the lines drawn\n",
")\n",

@ -71,6 +71,7 @@ dependencies = [
"pyyaml>=5.3.1",
"requests>=2.23.0",
"scipy>=1.4.1",
"torch>=1.8.0,<2.4.0; sys_platform == 'win32'", # Windows CPU errors https://github.com/ultralytics/ultralytics/issues/15049
"torch>=1.8.0",
"torchvision>=0.9.0",
"tqdm>=4.64.0", # progress bars
@ -93,7 +94,7 @@ dev = [
"mkdocstrings[python]",
"mkdocs-jupyter", # notebooks
"mkdocs-redirects", # 301 redirects
"mkdocs-ultralytics-plugin>=0.1.2", # for meta descriptions and images, dates and authors
"mkdocs-ultralytics-plugin>=0.1.6", # for meta descriptions and images, dates and authors
"mkdocs-macros-plugin>=1.0.5" # duplicating content (i.e. export tables) in multiple places
]
export = [

@ -5,9 +5,11 @@ import pytest
from ultralytics import Explorer
from ultralytics.utils import ASSETS
from ultralytics.utils.torch_utils import TORCH_1_13
@pytest.mark.slow
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_similarity():
"""Test the correctness and response length of similarity calculations and SQL queries in the Explorer."""
exp = Explorer(data="coco8.yaml")
@ -25,6 +27,7 @@ def test_similarity():
@pytest.mark.slow
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_det():
"""Test detection functionalities and verify embedding table includes bounding boxes."""
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
@ -38,6 +41,7 @@ def test_det():
@pytest.mark.slow
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_seg():
"""Test segmentation functionalities and ensure the embedding table includes segmentation masks."""
exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
@ -50,6 +54,7 @@ def test_seg():
@pytest.mark.slow
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_pose():
"""Test pose estimation functionality and verify the embedding table includes keypoints."""
exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")

@ -252,6 +252,8 @@ def test_labels_and_crops():
for r in results:
im_name = Path(r.path).stem
cls_idxs = r.boxes.cls.int().tolist()
# Check correct detections
assert cls_idxs == ([0, 0, 5, 0, 7] if r.path.endswith("bus.jpg") else [0, 0]) # bus.jpg and zidane.jpg classes
# Check label path
labels = save_path / f"labels/{im_name}.txt"
assert labels.exists()

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.2.83"
__version__ = "8.2.86"
import os

@ -490,7 +490,7 @@ def convert_dota_to_yolo_obb(dota_root_path: str):
normalized_coords = [
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
]
formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords]
formatted_coords = [f"{coord:.6g}" for coord in normalized_coords]
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
for phase in ["train", "val"]:

@ -296,7 +296,7 @@ class GroundingDataset(YOLODataset):
"""Loads annotations from a JSON file, filters, and normalizes bounding boxes for each image."""
labels = []
LOGGER.info("Loading annotation file...")
with open(self.json_file, "r") as f:
with open(self.json_file) as f:
annotations = json.load(f)
images = {f'{x["id"]:d}': x for x in annotations["images"]}
img_to_anns = defaultdict(list)

@ -193,7 +193,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir, allow_background_i
with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
for lb in label:
formatted_coords = ["{:.6g}".format(coord) for coord in lb[1:]]
formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")

@ -138,7 +138,7 @@ def try_export(inner_func):
LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{f}' ({file_size(f):.1f} MB)")
return f, model
except Exception as e:
LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
LOGGER.error(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
raise e
return outer_func
@ -204,8 +204,8 @@ class Exporter:
self.args.half = False
assert not self.args.dynamic, "half=True not compatible with dynamic=True, i.e. use only one."
self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size
if self.args.int8 and (engine or xml):
self.args.dynamic = True # enforce dynamic to export TensorRT INT8; ensures ONNX is dynamic
if self.args.int8 and engine:
self.args.dynamic = True # enforce dynamic to export TensorRT INT8
if self.args.optimize:
assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
@ -248,6 +248,7 @@ class Exporter:
m.dynamic = self.args.dynamic
m.export = True
m.format = self.args.format
m.max_det = self.args.max_det
elif isinstance(m, C2f) and not is_tf_format:
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
m.forward = m.forward_split
@ -353,18 +354,20 @@ class Exporter:
"""Build and return a dataloader suitable for calibration of INT8 models."""
LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
data = (check_cls_dataset if self.model.task == "classify" else check_det_dataset)(self.args.data)
# TensorRT INT8 calibration should use 2x batch size
batch = self.args.batch * (2 if self.args.format == "engine" else 1)
dataset = YOLODataset(
data[self.args.split or "val"],
data=data,
task=self.model.task,
imgsz=self.imgsz[0],
augment=False,
batch_size=self.args.batch * 2, # NOTE TensorRT INT8 calibration should use 2x batch size
batch_size=batch,
)
n = len(dataset)
if n < 300:
LOGGER.warning(f"{prefix} WARNING ⚠ >300 images recommended for INT8 calibration, found {n} images.")
return build_dataloader(dataset, batch=self.args.batch * 2, workers=0) # required for batch loading
return build_dataloader(dataset, batch=batch, workers=0) # required for batch loading
@try_export
def export_torchscript(self, prefix=colorstr("TorchScript:")):
@ -420,7 +423,6 @@ class Exporter:
# Checks
model_onnx = onnx.load(f) # load onnx model
# onnx.checker.check_model(model_onnx) # check onnx model
# Simplify
if self.args.simplify:
@ -430,10 +432,6 @@ class Exporter:
LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
model_onnx = onnxslim.slim(model_onnx)
# ONNX Simplifier (deprecated as must be compiled with 'cmake' in aarch64 and Conda CI environments)
# import onnxsim
# model_onnx, check = onnxsim.simplify(model_onnx)
# assert check, "Simplified ONNX model could not be validated"
except Exception as e:
LOGGER.warning(f"{prefix} simplifier failure: {e}")
@ -677,7 +675,6 @@ class Exporter:
def export_engine(self, prefix=colorstr("TensorRT:")):
"""YOLOv8 TensorRT export https://developer.nvidia.com/tensorrt."""
assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
# self.args.simplify = True
f_onnx, _ = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
try:
@ -784,7 +781,7 @@ class Exporter:
# Load dataset w/ builder (for batching) and calibrate
config.int8_calibrator = EngineCalibrator(
dataset=self.get_int8_calibration_dataloader(prefix),
batch=2 * self.args.batch,
batch=2 * self.args.batch, # TensorRT INT8 calibration should use 2x batch size
cache=str(self.file.with_suffix(".cache")),
)
@ -867,8 +864,6 @@ class Exporter:
f.mkdir()
images = [batch["img"].permute(0, 2, 3, 1) for batch in self.get_int8_calibration_dataloader(prefix)]
images = torch.cat(images, 0).float()
# mean = images.view(-1, 3).mean(0) # imagenet mean [123.675, 116.28, 103.53]
# std = images.view(-1, 3).std(0) # imagenet std [58.395, 57.12, 57.375]
np.save(str(tmp_file), images.numpy().astype(np.float32)) # BHWC
np_data = [["images", tmp_file, [[[[0, 0, 0]]]], [[[[255, 255, 255]]]]]]
else:
@ -996,20 +991,7 @@ class Exporter:
if " " in f:
LOGGER.warning(f"{prefix} WARNING ⚠ your model may not work correctly with spaces in path '{f}'.")
# f_json = Path(f) / 'model.json' # *.json path
# with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
# subst = re.sub(
# r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
# r'"Identity.?.?": {"name": "Identity.?.?"}, '
# r'"Identity.?.?": {"name": "Identity.?.?"}, '
# r'"Identity.?.?": {"name": "Identity.?.?"}}}',
# r'{"outputs": {"Identity": {"name": "Identity"}, '
# r'"Identity_1": {"name": "Identity_1"}, '
# r'"Identity_2": {"name": "Identity_2"}, '
# r'"Identity_3": {"name": "Identity_3"}}}',
# f_json.read_text(),
# )
# j.write(subst)
# Add metadata
yaml_save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
return f, None
@ -1102,27 +1084,11 @@ class Exporter:
names = self.metadata["names"]
nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
_, nc = out0_shape # number of anchors, number of classes
# _, nc = out0.type.multiArrayType.shape
assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check
# Define output shapes (missing)
out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
# spec.neuralNetwork.preprocessing[0].featureName = '0'
# Flexible input shapes
# from coremltools.models.neural_network import flexible_shape_utils
# s = [] # shapes
# s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
# s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
# flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
# r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
# r.add_height_range((192, 640))
# r.add_width_range((192, 640))
# flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
# Print
# print(spec.description)
# Model from spec
model = ct.models.MLModel(spec, weights_dir=weights_dir)

@ -328,7 +328,7 @@ class BasePredictor:
frame = int(match[1]) if match else None # 0 if frame undetermined
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
string += "%gx%g " % im.shape[2:]
string += "{:g}x{:g} ".format(*im.shape[2:])
result = self.results[i]
result.save_dir = self.save_dir.__str__() # used in other locations
string += f"{result.verbose()}{result.speed['inference']:.1f}ms"

@ -202,8 +202,9 @@ class BaseValidator:
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
else:
LOGGER.info(
"Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image"
% tuple(self.speed.values())
"Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
*tuple(self.speed.values())
)
)
if self.args.save_json and self.jdict:
with open(str(self.save_dir / "predictions.json"), "w") as f:

@ -55,23 +55,22 @@ def request_with_credentials(url: str) -> any:
display.display(
display.Javascript(
"""
window._hub_tmp = new Promise((resolve, reject) => {
f"""
window._hub_tmp = new Promise((resolve, reject) => {{
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
fetch("%s", {
fetch("{url}", {{
method: 'POST',
credentials: 'include'
})
}})
.then((response) => resolve(response.json()))
.then((json) => {
.then((json) => {{
clearTimeout(timeout);
}).catch((err) => {
}}).catch((err) => {{
clearTimeout(timeout);
reject(err);
});
});
}});
}});
"""
% url
)
)
return output.eval_js("_hub_tmp")

@ -100,7 +100,7 @@ class FastSAMPredictor(SegmentationPredictor):
texts = [texts]
crop_ims, filter_idx = [], []
for i, b in enumerate(result.boxes.xyxy.tolist()):
x1, y1, x2, y2 = [int(x) for x in b]
x1, y1, x2, y2 = (int(x) for x in b)
if masks[i].sum() <= 100:
filter_idx.append(i)
continue

@ -106,7 +106,7 @@ class SAM(Model):
... print(f"Detected {len(r.masks)} masks")
"""
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024)
kwargs.update(overrides)
kwargs = {**overrides, **kwargs}
prompts = dict(bboxes=bboxes, points=points, labels=labels)
return super().predict(source, stream, prompts=prompts, **kwargs)

@ -35,7 +35,7 @@ class DropPath(nn.Module):
def __init__(self, drop_prob=0.0, scale_by_keep=True):
"""Initialize DropPath module for stochastic depth regularization during training."""
super(DropPath, self).__init__()
super().__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep

@ -151,7 +151,12 @@ class ImageEncoderViT(nn.Module):
"""Processes input through patch embedding, positional embedding, transformer blocks, and neck module."""
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
pos_embed = (
F.interpolate(self.pos_embed.permute(0, 3, 1, 2), scale_factor=self.img_size / 1024).permute(0, 2, 3, 1)
if self.img_size != 1024
else self.pos_embed
)
x = x + pos_embed
for blk in self.blocks:
x = blk(x)
return self.neck(x.permute(0, 3, 1, 2))

@ -90,6 +90,19 @@ class SAMModel(nn.Module):
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
def set_imgsz(self, imgsz):
"""
Set image size to make model compatible with different image sizes.
Args:
imgsz (Tuple[int, int]): The size of the input image.
"""
if hasattr(self.image_encoder, "set_imgsz"):
self.image_encoder.set_imgsz(imgsz)
self.prompt_encoder.input_image_size = imgsz
self.prompt_encoder.image_embedding_size = [x // 16 for x in imgsz] # 16 is fixed as patch size of ViT model
self.image_encoder.img_size = imgsz[0]
class SAM2Model(torch.nn.Module):
"""
@ -940,3 +953,14 @@ class SAM2Model(torch.nn.Module):
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def set_imgsz(self, imgsz):
"""
Set image size to make model compatible with different image sizes.
Args:
imgsz (Tuple[int, int]): The size of the input image.
"""
self.image_size = imgsz[0]
self.sam_prompt_encoder.input_image_size = imgsz
self.sam_prompt_encoder.image_embedding_size = [x // 16 for x in imgsz] # fixed ViT patch size of 16

@ -982,10 +982,31 @@ class TinyViT(nn.Module):
layer = self.layers[i]
x = layer(x)
batch, _, channel = x.shape
x = x.view(batch, 64, 64, channel)
x = x.view(batch, self.patches_resolution[0] // 4, self.patches_resolution[1] // 4, channel)
x = x.permute(0, 3, 1, 2)
return self.neck(x)
def forward(self, x):
"""Performs the forward pass through the TinyViT model, extracting features from the input image."""
return self.forward_features(x)
def set_imgsz(self, imgsz=[1024, 1024]):
"""
Set image size to make model compatible with different image sizes.
Args:
imgsz (Tuple[int, int]): The size of the input image.
"""
imgsz = [s // 4 for s in imgsz]
self.patches_resolution = imgsz
for i, layer in enumerate(self.layers):
input_resolution = (
imgsz[0] // (2 ** (i - 1 if i == 3 else i)),
imgsz[1] // (2 ** (i - 1 if i == 3 else i)),
)
layer.input_resolution = input_resolution
if layer.downsample is not None:
layer.downsample.input_resolution = input_resolution
if isinstance(layer, BasicLayer):
for b in layer.blocks:
b.input_resolution = input_resolution

@ -95,7 +95,7 @@ class Predictor(BasePredictor):
"""
if overrides is None:
overrides = {}
overrides.update(dict(task="segment", mode="predict", imgsz=1024))
overrides.update(dict(task="segment", mode="predict"))
super().__init__(cfg, overrides, _callbacks)
self.args.retina_masks = True
self.im = None
@ -455,8 +455,11 @@ class Predictor(BasePredictor):
cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)
pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1)
masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]
masks = masks > self.model.mask_threshold # to bool
if len(masks) == 0:
masks = None
else:
masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]
masks = masks > self.model.mask_threshold # to bool
results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))
# Reset segment-all mode.
self.segment_all = False
@ -522,6 +525,10 @@ class Predictor(BasePredictor):
def get_im_features(self, im):
"""Extracts image features using the SAM model's image encoder for subsequent mask prediction."""
assert (
isinstance(self.imgsz, (tuple, list)) and self.imgsz[0] == self.imgsz[1]
), f"SAM models only support square image size, but got {self.imgsz}."
self.model.set_imgsz(self.imgsz)
return self.model.image_encoder(im)
def set_prompts(self, prompts):
@ -761,6 +768,12 @@ class SAM2Predictor(Predictor):
def get_im_features(self, im):
"""Extracts image features from the SAM image encoder for subsequent processing."""
assert (
isinstance(self.imgsz, (tuple, list)) and self.imgsz[0] == self.imgsz[1]
), f"SAM 2 models only support square image size, but got {self.imgsz}."
self.model.set_imgsz(self.imgsz)
self._bb_feat_sizes = [[x // (4 * i) for x in self.imgsz] for i in [1, 2, 4]]
backbone_out = self.model.forward_image(im)
_, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
if self.model.directly_add_no_mem_embed:

@ -672,7 +672,7 @@ class CBLinear(nn.Module):
def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
"""Initializes the CBLinear module, passing inputs unchanged."""
super(CBLinear, self).__init__()
super().__init__()
self.c2s = c2s
self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)
@ -686,7 +686,7 @@ class CBFuse(nn.Module):
def __init__(self, idx):
"""Initializes CBFuse module with layer index for selective feature fusion."""
super(CBFuse, self).__init__()
super().__init__()
self.idx = idx
def forward(self, xs):

@ -144,12 +144,12 @@ class Detect(nn.Module):
(torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6) and last
dimension format [x, y, w, h, max_class_prob, class_index].
"""
batch_size, anchors, predictions = preds.shape # i.e. shape(16,8400,84)
batch_size, anchors, _ = preds.shape # i.e. shape(16,8400,84)
boxes, scores = preds.split([4, nc], dim=-1)
index = scores.amax(dim=-1).topk(min(max_det, anchors))[1].unsqueeze(-1)
boxes = boxes.gather(dim=1, index=index.repeat(1, 1, 4))
scores = scores.gather(dim=1, index=index.repeat(1, 1, nc))
scores, index = scores.flatten(1).topk(max_det)
scores, index = scores.flatten(1).topk(min(max_det, anchors))
i = torch.arange(batch_size)[..., None] # batch indices
return torch.cat([boxes[i, index // nc], scores[..., None], (index % nc)[..., None].float()], dim=-1)

@ -210,7 +210,7 @@ class ParkingManagement:
Args:
json_file (str): file that have all parking slot points
"""
with open(json_file, "r") as f:
with open(json_file) as f:
return json.load(f)
def process_data(self, json_data, im0, boxes, clss):

@ -213,7 +213,7 @@ class RF100Benchmark:
os.mkdir("ultralytics-benchmarks")
safe_download("https://github.com/ultralytics/assets/releases/download/v0.0.0/datasets_links.txt")
with open(ds_link_txt, "r") as file:
with open(ds_link_txt) as file:
for line in file:
try:
_, url, workspace, project, version = re.split("/+", line.strip())
@ -237,7 +237,7 @@ class RF100Benchmark:
Args:
path (str): YAML file path.
"""
with open(path, "r") as file:
with open(path) as file:
yaml_data = yaml.safe_load(file)
yaml_data["train"] = "train/images"
yaml_data["val"] = "valid/images"
@ -257,7 +257,7 @@ class RF100Benchmark:
skip_symbols = ["🚀", "", "💡", ""]
with open(yaml_path) as stream:
class_names = yaml.safe_load(stream)["names"]
with open(val_log_file, "r", encoding="utf-8") as f:
with open(val_log_file, encoding="utf-8") as f:
lines = f.readlines()
eval_lines = []
for line in lines:

@ -29,11 +29,13 @@ from ultralytics.utils import (
IS_PIP_PACKAGE,
LINUX,
LOGGER,
MACOS,
ONLINE,
PYTHON_VERSION,
ROOT,
TORCHVISION_VERSION,
USER_CONFIG_DIR,
WINDOWS,
Retry,
SimpleNamespace,
ThreadingLocked,
@ -224,6 +226,14 @@ def check_version(
if not required: # if required is '' or None
return True
if "sys_platform" in required: # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
if (
(WINDOWS and "win32" not in required)
or (LINUX and "linux" not in required)
or (MACOS and "macos" not in required and "darwin" not in required)
):
return True
op = ""
version = ""
result = True
@ -422,6 +432,7 @@ def check_torchvision():
"""
# Compatibility table
compatibility_table = {
"2.4": ["0.19"],
"2.3": ["0.18"],
"2.2": ["0.17"],
"2.1": ["0.16"],

@ -460,7 +460,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names={}, on_plot=N
else:
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean())
ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5")
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_xlim(0, 1)

@ -218,7 +218,7 @@ def non_max_suppression(
classes = torch.tensor(classes, device=prediction.device)
if prediction.shape[-1] == 6: # end-to-end model (BNC, i.e. 1,300,6)
output = [pred[pred[:, 4] > conf_thres] for pred in prediction]
output = [pred[pred[:, 4] > conf_thres][:max_det] for pred in prediction]
if classes is not None:
output = [pred[(pred[:, 5:6] == classes).any(1)] for pred in output]
return output

@ -1,4 +1,5 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import gc
import math
@ -24,6 +25,7 @@ from ultralytics.utils import (
NUM_THREADS,
PYTHON_VERSION,
TORCHVISION_VERSION,
WINDOWS,
__version__,
colorstr,
)
@ -42,6 +44,11 @@ TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
if WINDOWS and torch.__version__[:3] == "2.4": # reject all versions of 2.4 on Windows
LOGGER.warning(
"WARNING ⚠ Known issue with torch>=2.4.0 on Windows with CPU, recommend downgrading to torch<=2.3.1 to resolve "
"https://github.com/ultralytics/ultralytics/issues/15049"
)
@contextmanager

Loading…
Cancel
Save