diff --git a/docs/en/guides/region-counting.md b/docs/en/guides/region-counting.md index 94120bcab6..d1c439fa2f 100644 --- a/docs/en/guides/region-counting.md +++ b/docs/en/guides/region-counting.md @@ -4,7 +4,7 @@ description: Learn how to use Ultralytics YOLOv8 for precise object counting in keywords: object counting, regions, YOLOv8, computer vision, Ultralytics, efficiency, accuracy, automation, real-time, applications, surveillance, monitoring --- -# Object Counting in Different Regions using Ultralytics YOLOv8 🚀 +# Object Counting in Different Regions using Ultralytics YOLO 🚀 ## What is Object Counting in Regions? @@ -39,44 +39,45 @@ keywords: object counting, regions, YOLOv8, computer vision, Ultralytics, effici === "Python" ```python - import cv2 - from ultralytics import solutions - - cap = cv2.VideoCapture("Path/to/video/file.mp4") - assert cap.isOpened(), "Error reading video file" - w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) - - # Define region points - # region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)] # Pass region as list - - # pass region as dictionary - region_points = { - "region-01": [(50, 50), (250, 50), (250, 250), (50, 250)], - "region-02": [(640, 640), (780, 640), (780, 720), (640, 720)] - } - - # Video writer - video_writer = cv2.VideoWriter("region_counting.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) - - # Init Object Counter - region = solutions.RegionCounter( - show=True, - region=region_points, - model="yolo11n.pt", - ) - - # Process video - while cap.isOpened(): - success, im0 = cap.read() - if not success: - print("Video frame is empty or video processing has been successfully completed.") - break - im0 = region.count(im0) - video_writer.write(im0) - - cap.release() - video_writer.release() - cv2.destroyAllWindows() + import cv2 + + from ultralytics import solutions + + cap = cv2.VideoCapture("Path/to/video/file.mp4") + assert cap.isOpened(), "Error reading video file" + w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS)) + + # Define region points + # region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)] # Pass region as list + + # pass region as dictionary + region_points = { + "region-01": [(50, 50), (250, 50), (250, 250), (50, 250)], + "region-02": [(640, 640), (780, 640), (780, 720), (640, 720)], + } + + # Video writer + video_writer = cv2.VideoWriter("region_counting.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) + + # Init Object Counter + region = solutions.RegionCounter( + show=True, + region=region_points, + model="yolo11n.pt", + ) + + # Process video + while cap.isOpened(): + success, im0 = cap.read() + if not success: + print("Video frame is empty or video processing has been successfully completed.") + break + im0 = region.count(im0) + video_writer.write(im0) + + cap.release() + video_writer.release() + cv2.destroyAllWindows() ``` !!! tip "Ultralytics Example Code" diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py index de9ef96a17..5f0222a71f 100644 --- a/ultralytics/cfg/__init__.py +++ b/ultralytics/cfg/__init__.py @@ -11,7 +11,6 @@ import cv2 from ultralytics.utils import ( ASSETS, - ASSETS_URL, DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, diff --git a/ultralytics/models/yolo/detect/train.py b/ultralytics/models/yolo/detect/train.py index 606b9fb92b..4a32acec1e 100644 --- a/ultralytics/models/yolo/detect/train.py +++ b/ultralytics/models/yolo/detect/train.py @@ -146,5 +146,5 @@ class DetectionTrainer(BaseTrainer): """Get batch size by calculating memory occupation of model.""" train_dataset = self.build_dataset(self.trainset, mode="train", batch=16) # 4 for mosaic augmentation - max_num_obj = max(len(l["cls"]) for l in train_dataset.labels) * 4 + max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4 return super().auto_batch(max_num_obj) diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py index b413297be0..a6e7447629 100644 --- a/ultralytics/utils/torch_utils.py +++ b/ultralytics/utils/torch_utils.py @@ -301,28 +301,22 @@ def fuse_deconv_and_bn(deconv, bn): def model_info(model, detailed=False, verbose=True, imgsz=640): - """ - Model information. - - imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320]. - """ + """Print and return detailed model information layer by layer.""" if not verbose: return n_p = get_num_params(model) # number of parameters n_g = get_num_gradients(model) # number of gradients n_l = len(list(model.modules())) # number of layers if detailed: - LOGGER.info( - f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}" - ) + LOGGER.info(f"{'layer':>5}{'name':>40}{'gradient':>10}{'parameters':>12}{'shape':>20}{'mu':>10}{'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace("module_list.", "") LOGGER.info( - "%5g %40s %9s %12g %20s %10.3g %10.3g %10s" - % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype) + f"{i:>5g}{name:>40s}{p.requires_grad!r:>10}{p.numel():>12g}{str(list(p.shape)):>20s}" + f"{p.mean():>10.3g}{p.std():>10.3g}{str(p.dtype):>15s}" ) - flops = get_flops(model, imgsz) + flops = get_flops(model, imgsz) # imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320] fused = " (fused)" if getattr(model, "is_fused", lambda: False)() else "" fs = f", {flops:.1f} GFLOPs" if flops else "" yaml_file = getattr(model, "yaml_file", "") or getattr(model, "yaml", {}).get("yaml_file", "")