`ultralytics 8.0.190` add `thop>=0.1.1` to requirements.txt (#5162)

Co-authored-by: JohanWesto <Johan.westo@gmail.com>
Co-authored-by: Muhammad Rizwan Munawar <62513924+RizwanMunawar@users.noreply.github.com>
Co-authored-by: StephenBeirlaen <11806615+StephenBeirlaen@users.noreply.github.com>
pull/4998/head^2 v8.0.190
Glenn Jocher 1 year ago committed by GitHub
parent 092b58a8cf
commit 9aaa5d5ed0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 7
      docs/modes/predict.md
  2. 9
      docs/modes/track.md
  3. 2
      requirements.txt
  4. 2
      ultralytics/__init__.py
  5. 14
      ultralytics/data/augment.py
  6. 2
      ultralytics/data/dataset.py
  7. 3
      ultralytics/engine/exporter.py

@ -23,6 +23,13 @@ In the world of machine learning and computer vision, the process of making sens
<strong>Watch:</strong> How to Extract the Outputs from Ultralytics YOLOv8 Model for Custom Projects.
</p>
## Real-world Applications
| Manufacturing | Sports | Safety |
|:-----------------------------------:|:-----------------------:|:-----------:|
| ![Vehicle Spare Parts Detection](https://github.com/RizwanMunawar/ultralytics/assets/62513924/a0f802a8-0776-44cf-8f17-93974a4a28a1) | ![Football Player Detection](https://github.com/RizwanMunawar/ultralytics/assets/62513924/7d320e1f-fc57-4d7f-a691-78ee579c3442)| ![People Fall Detection](https://github.com/RizwanMunawar/ultralytics/assets/62513924/86437c4a-3227-4eee-90ef-9efb697bdb43) |
| Vehicle Spare Parts Detection | Football Player Detection | People Fall Detection |
## Why Use Ultralytics YOLO for Inference?
Here's why you should consider YOLOv8's predict mode for your various inference needs:

@ -30,6 +30,13 @@ The output from Ultralytics trackers is consistent with standard object detectio
<strong>Watch:</strong> Object Detection and Tracking with Ultralytics YOLOv8.
</p>
## Real-world Applications
| Transportation | Retail | Aquaculture |
|:-----------------------------------:|:-----------------------:|:-----------:|
| ![Vehicle Tracking](https://github.com/RizwanMunawar/ultralytics/assets/62513924/ee6e6038-383b-4f21-ac29-b2a1c7d386ab) | ![People Tracking](https://github.com/RizwanMunawar/ultralytics/assets/62513924/93bb4ee2-77a0-4e4e-8eb6-eb8f527f0527) | ![Fish Tracking](https://github.com/RizwanMunawar/ultralytics/assets/62513924/a5146d0f-bfa8-4e0a-b7df-3c1446cd8142) |
| Vehicle Tracking | People Tracking | Fish Tracking |
## Features at a Glance
Ultralytics YOLO extends its object detection features to provide robust and versatile object tracking:
@ -321,4 +328,4 @@ By contributing to this section, you help expand the scope of tracking solutions
To initiate your contribution, please refer to our [Contributing Guide](https://docs.ultralytics.com/help/contributing) for comprehensive instructions on submitting a Pull Request (PR) 🛠. We are excited to see what you bring to the table!
Together, let's enhance the tracking capabilities of the Ultralytics YOLO ecosystem 🙏!
Together, let's enhance the tracking capabilities of the Ultralytics YOLO ecosystem 🙏!

@ -38,7 +38,7 @@ seaborn>=0.11.0
# Extras --------------------------------------
psutil # system utilization
py-cpuinfo # display CPU info
# thop>=0.1.1 # FLOPs computation
thop>=0.1.1 # FLOPs computation
# ipython # interactive notebook
# albumentations>=1.0.3 # training augmentations
# pycocotools>=2.0.6 # COCO mAP

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.189'
__version__ = '8.0.190'
from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM

@ -791,14 +791,14 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
# Classification augmentations -----------------------------------------------------------------------------------------
def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD
# Transforms to apply if albumentations not installed
def classify_transforms(size=224, rect=False, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD
"""Transforms to apply if albumentations not installed."""
if not isinstance(size, int):
raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)')
transforms = [ClassifyLetterBox(size, auto=True) if rect else CenterCrop(size), ToTensor()]
if any(mean) or any(std):
return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)])
else:
return T.Compose([CenterCrop(size), ToTensor()])
transforms.append(T.Normalize(mean, std, inplace=True))
return T.Compose(transforms)
def hsv2colorjitter(h, s, v):
@ -864,9 +864,9 @@ class ClassifyLetterBox:
imh, imw = im.shape[:2]
r = min(self.h / imh, self.w / imw) # ratio of new/old
h, w = round(imh * r), round(imw * r) # resized image
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w
hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else (self.h, self.w)
top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1)
im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype)
im_out = np.full((hs, ws, 3), 114, dtype=im.dtype)
im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR)
return im_out

@ -222,7 +222,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder):
self.cache_disk = cache == 'disk'
self.samples = self.verify_images() # filter out bad images
self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
self.torch_transforms = classify_transforms(args.imgsz)
self.torch_transforms = classify_transforms(args.imgsz, rect=args.rect)
self.album_transforms = classify_albumentations(
augment=augment,
size=args.imgsz,

@ -627,6 +627,9 @@ class Exporter:
if builder.platform_has_fast_fp16 and self.args.half:
config.set_flag(trt.BuilderFlag.FP16)
del self.model
torch.cuda.empty_cache()
# Write file
with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
# Metadata

Loading…
Cancel
Save