Update notebooks (#17065)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
pull/17076/head^2
Muhammad Rizwan Munawar 1 month ago committed by GitHub
parent b9747791df
commit 71624018e2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 17
      examples/heatmaps.ipynb
  2. 22
      examples/object_counting.ipynb

@ -96,10 +96,7 @@
"source": [
"import cv2\n",
"\n",
"from ultralytics import YOLO, solutions\n",
"\n",
"# Load YOLO model\n",
"model = YOLO(\"yolo11n.pt\")\n",
"from ultralytics import solutions\n",
"\n",
"# Open video file\n",
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
@ -113,10 +110,9 @@
"\n",
"# Initialize heatmap object\n",
"heatmap_obj = solutions.Heatmap(\n",
" colormap=cv2.COLORMAP_PARULA,\n",
" view_img=True,\n",
" shape=\"circle\",\n",
" names=model.names,\n",
" colormap=cv2.COLORMAP_PARULA, # Color of the heatmap\n",
" show=True, # Display the image during processing\n",
" model=yolo11n.pt, # Ultralytics YOLO11 model file\n",
")\n",
"\n",
"while cap.isOpened():\n",
@ -125,11 +121,8 @@
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
" break\n",
"\n",
" # Perform tracking on the current frame\n",
" tracks = model.track(im0, persist=True, show=False)\n",
"\n",
" # Generate heatmap on the frame\n",
" im0 = heatmap_obj.generate_heatmap(im0, tracks)\n",
" im0 = heatmap_obj.generate_heatmap(im0)\n",
"\n",
" # Write the frame to the output video\n",
" video_writer.write(im0)\n",

@ -104,10 +104,7 @@
"source": [
"import cv2\n",
"\n",
"from ultralytics import YOLO, solutions\n",
"\n",
"# Load the pre-trained YOLO11 model\n",
"model = YOLO(\"yolo11n.pt\")\n",
"from ultralytics import solutions\n",
"\n",
"# Open the video file\n",
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
@ -119,19 +116,15 @@
"# Define points for a line or region of interest in the video frame\n",
"line_points = [(20, 400), (1080, 400)] # Line coordinates\n",
"\n",
"# Specify classes to count, for example: person (0) and car (2)\n",
"classes_to_count = [0, 2] # Class IDs for person and car\n",
"\n",
"# Initialize the video writer to save the output video\n",
"video_writer = cv2.VideoWriter(\"object_counting_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
"\n",
"# Initialize the Object Counter with visualization options and other parameters\n",
"counter = solutions.ObjectCounter(\n",
" view_img=True, # Display the image during processing\n",
" reg_pts=line_points, # Region of interest points\n",
" names=model.names, # Class names from the YOLO model\n",
" draw_tracks=True, # Draw tracking lines for objects\n",
" line_thickness=2, # Thickness of the lines drawn\n",
" show=True, # Display the image during processing\n",
" region=line_points, # Region of interest points\n",
" model=yolo11n.pt, # Ultralytics YOLO11 model file\n",
" line_width=2, # Thickness of the lines and bounding boxes\n",
")\n",
"\n",
"# Process video frames in a loop\n",
@ -141,11 +134,8 @@
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
" break\n",
"\n",
" # Perform object tracking on the current frame, filtering by specified classes\n",
" tracks = model.track(im0, persist=True, show=False, classes=classes_to_count)\n",
"\n",
" # Use the Object Counter to count objects in the frame and get the annotated image\n",
" im0 = counter.start_counting(im0, tracks)\n",
" im0 = counter.count(im0)\n",
"\n",
" # Write the annotated frame to the output video\n",
" video_writer.write(im0)\n",

Loading…
Cancel
Save