Fix region-counting indents (#17835)

Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
pull/17813/head^2
Glenn Jocher 6 days ago committed by GitHub
parent 55b2137f18
commit 386a3b7625
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 79
      docs/en/guides/region-counting.md
  2. 1
      ultralytics/cfg/__init__.py
  3. 2
      ultralytics/models/yolo/detect/train.py
  4. 16
      ultralytics/utils/torch_utils.py

@ -4,7 +4,7 @@ description: Learn how to use Ultralytics YOLOv8 for precise object counting in
keywords: object counting, regions, YOLOv8, computer vision, Ultralytics, efficiency, accuracy, automation, real-time, applications, surveillance, monitoring keywords: object counting, regions, YOLOv8, computer vision, Ultralytics, efficiency, accuracy, automation, real-time, applications, surveillance, monitoring
--- ---
# Object Counting in Different Regions using Ultralytics YOLOv8 🚀 # Object Counting in Different Regions using Ultralytics YOLO 🚀
## What is Object Counting in Regions? ## What is Object Counting in Regions?
@ -39,44 +39,45 @@ keywords: object counting, regions, YOLOv8, computer vision, Ultralytics, effici
=== "Python" === "Python"
```python ```python
import cv2 import cv2
from ultralytics import solutions
from ultralytics import solutions
cap = cv2.VideoCapture("Path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file" cap = cv2.VideoCapture("Path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Define region points
# region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)] # Pass region as list # Define region points
# region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)] # Pass region as list
# pass region as dictionary
region_points = { # pass region as dictionary
"region-01": [(50, 50), (250, 50), (250, 250), (50, 250)], region_points = {
"region-02": [(640, 640), (780, 640), (780, 720), (640, 720)] "region-01": [(50, 50), (250, 50), (250, 250), (50, 250)],
} "region-02": [(640, 640), (780, 640), (780, 720), (640, 720)],
}
# Video writer
video_writer = cv2.VideoWriter("region_counting.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) # Video writer
video_writer = cv2.VideoWriter("region_counting.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
# Init Object Counter
region = solutions.RegionCounter( # Init Object Counter
show=True, region = solutions.RegionCounter(
region=region_points, show=True,
model="yolo11n.pt", region=region_points,
) model="yolo11n.pt",
)
# Process video
while cap.isOpened(): # Process video
success, im0 = cap.read() while cap.isOpened():
if not success: success, im0 = cap.read()
print("Video frame is empty or video processing has been successfully completed.") if not success:
break print("Video frame is empty or video processing has been successfully completed.")
im0 = region.count(im0) break
video_writer.write(im0) im0 = region.count(im0)
video_writer.write(im0)
cap.release()
video_writer.release() cap.release()
cv2.destroyAllWindows() video_writer.release()
cv2.destroyAllWindows()
``` ```
!!! tip "Ultralytics Example Code" !!! tip "Ultralytics Example Code"

@ -11,7 +11,6 @@ import cv2
from ultralytics.utils import ( from ultralytics.utils import (
ASSETS, ASSETS,
ASSETS_URL,
DEFAULT_CFG, DEFAULT_CFG,
DEFAULT_CFG_DICT, DEFAULT_CFG_DICT,
DEFAULT_CFG_PATH, DEFAULT_CFG_PATH,

@ -146,5 +146,5 @@ class DetectionTrainer(BaseTrainer):
"""Get batch size by calculating memory occupation of model.""" """Get batch size by calculating memory occupation of model."""
train_dataset = self.build_dataset(self.trainset, mode="train", batch=16) train_dataset = self.build_dataset(self.trainset, mode="train", batch=16)
# 4 for mosaic augmentation # 4 for mosaic augmentation
max_num_obj = max(len(l["cls"]) for l in train_dataset.labels) * 4 max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4
return super().auto_batch(max_num_obj) return super().auto_batch(max_num_obj)

@ -301,28 +301,22 @@ def fuse_deconv_and_bn(deconv, bn):
def model_info(model, detailed=False, verbose=True, imgsz=640): def model_info(model, detailed=False, verbose=True, imgsz=640):
""" """Print and return detailed model information layer by layer."""
Model information.
imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].
"""
if not verbose: if not verbose:
return return
n_p = get_num_params(model) # number of parameters n_p = get_num_params(model) # number of parameters
n_g = get_num_gradients(model) # number of gradients n_g = get_num_gradients(model) # number of gradients
n_l = len(list(model.modules())) # number of layers n_l = len(list(model.modules())) # number of layers
if detailed: if detailed:
LOGGER.info( LOGGER.info(f"{'layer':>5}{'name':>40}{'gradient':>10}{'parameters':>12}{'shape':>20}{'mu':>10}{'sigma':>10}")
f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}"
)
for i, (name, p) in enumerate(model.named_parameters()): for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace("module_list.", "") name = name.replace("module_list.", "")
LOGGER.info( LOGGER.info(
"%5g %40s %9s %12g %20s %10.3g %10.3g %10s" f"{i:>5g}{name:>40s}{p.requires_grad!r:>10}{p.numel():>12g}{str(list(p.shape)):>20s}"
% (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype) f"{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype):>15s}"
) )
flops = get_flops(model, imgsz) flops = get_flops(model, imgsz) # imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]
fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else "" fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else ""
fs = f", {flops:.1f} GFLOPs" if flops else "" fs = f", {flops:.1f} GFLOPs" if flops else ""
yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "") yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "")

Loading…
Cancel
Save