`ultralytics 8.0.237` `cv2.CAP_PROP` fix and `in_counts` and `out_counts` displays (#7380)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: ayush chaurasia <ayush.chaurarsia@gmail.com>
Co-authored-by: Muhammad Rizwan Munawar <chr043416@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com>
pull/7404/head v8.0.237
Glenn Jocher 11 months ago committed by GitHub
parent 71fe5e919d
commit 8c2b2f56b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/links.yml
  2. 5
      docs/en/guides/distance-calculation.md
  3. 46
      docs/en/guides/heatmaps.md
  4. 10
      docs/en/guides/instance-segmentation-and-tracking.md
  5. 17
      docs/en/guides/object-counting.md
  6. 5
      docs/en/guides/speed-estimation.md
  7. 12
      docs/en/guides/vision-eye.md
  8. 6
      docs/en/guides/workouts-monitoring.md
  9. 8
      examples/heatmaps.ipynb
  10. 6
      examples/object_counting.ipynb
  11. 15
      examples/object_tracking.ipynb
  12. 2
      ultralytics/__init__.py
  13. 2
      ultralytics/solutions/heatmap.py
  14. 30
      ultralytics/solutions/object_counter.py
  15. 32
      ultralytics/utils/plotting.py

@ -47,7 +47,6 @@ jobs:
--exclude-path docs/ko \
--exclude-path docs/hi \
--exclude-path docs/ar \
--exclude-mail \
--github-token ${{ secrets.GITHUB_TOKEN }} \
'./**/*.md' './**/*.html'
@ -73,6 +72,5 @@ jobs:
--exclude-path docs/ko \
--exclude-path docs/hi \
--exclude-path docs/ar \
--exclude-mail \
--github-token ${{ secrets.GITHUB_TOKEN }} \
'./**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'

@ -33,12 +33,13 @@ Measuring the gap between two objects is known as distance calculation within a
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Video writer
video_writer = cv2.VideoWriter("distance_calculation.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
# Init distance-calculation obj
dist_obj = distance_calculation.DistanceCalculation()

@ -50,18 +50,19 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Video writer
video_writer = cv2.VideoWriter("heatmap_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
# Init heatmap
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA ,
imw=cap.get(4), # should same as cap height
imh=cap.get(3), # should same as cap width
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA,
imw=w,
imh=h,
view_img=True,
shape="circle")
@ -90,20 +91,21 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Video writer
video_writer = cv2.VideoWriter("heatmap_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
line_points = [(256, 409), (694, 532)] # line for object counting
# Init heatmap
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA ,
imw=cap.get(4), # should same as cap height
imh=cap.get(3), # should same as cap width
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA,
imw=w,
imh=h,
view_img=True,
shape="circle",
count_reg_pts=line_points)
@ -132,21 +134,22 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Video writer
video_writer = cv2.VideoWriter("heatmap_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
# Define region points
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
# Init heatmap
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA ,
imw=cap.get(4), # should same as cap height
imh=cap.get(3), # should same as cap width
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA,
imw=w,
imh=h,
view_img=True,
shape="circle",
count_reg_pts=region_points)
@ -178,7 +181,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
# Heatmap Init
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA ,
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA,
imw=im0.shape[0], # should same as im0 width
imh=im0.shape[1], # should same as im0 height
view_img=True,
@ -199,20 +202,21 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Video writer
video_writer = cv2.VideoWriter("heatmap_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
classes_for_heatmap = [0, 2] # classes for heatmap
# Init heatmap
heatmap_obj = heatmap.Heatmap()
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA ,
imw=cap.get(4), # should same as cap height
imh=cap.get(3), # should same as cap width
heatmap_obj.set_args(colormap=cv2.COLORMAP_PARULA,
imw=w,
imh=h,
view_img=True,
shape="circle")

@ -34,10 +34,9 @@ There are two types of instance segmentation tracking available in the Ultralyti
model = YOLO("yolov8n-seg.pt")
names = model.model.names
cap = cv2.VideoCapture("path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter('instance-segmentation.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
30, (int(cap.get(3)), int(cap.get(4))))
out = cv2.VideoWriter('instance-segmentation.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
while True:
ret, im0 = cap.read()
@ -80,10 +79,9 @@ There are two types of instance segmentation tracking available in the Ultralyti
model = YOLO("yolov8n-seg.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter('instance-segmentation-object-tracking.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
30, (int(cap.get(3)), int(cap.get(4))))
out = cv2.VideoWriter('instance-segmentation-object-tracking.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
while True:
ret, im0 = cap.read()

@ -45,6 +45,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Define region points
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
@ -52,8 +53,8 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
# Video writer
video_writer = cv2.VideoWriter("object_counting_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
# Init Object Counter
counter = object_counter.ObjectCounter()
@ -87,6 +88,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Define line points
line_points = [(20, 400), (1080, 400)]
@ -94,8 +96,8 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
# Video writer
video_writer = cv2.VideoWriter("object_counting_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
# Init Object Counter
counter = object_counter.ObjectCounter()
@ -128,6 +130,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
line_points = [(20, 400), (1080, 400)] # line or region points
classes_to_count = [0, 2] # person and car classes for count
@ -135,8 +138,8 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
# Video writer
video_writer = cv2.VideoWriter("object_counting_output.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
# Init Object Counter
counter = object_counter.ObjectCounter()
@ -170,6 +173,8 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
| Name | Type | Default | Description |
|---------------------|-------------|----------------------------|-----------------------------------------------|
| view_img | `bool` | `False` | Display frames with counts |
| view_in_counts | `bool` | `True` | Display incounts only on video frame |
| view_out_counts | `bool` | `True` | Display outcounts only on video frame |
| line_thickness | `int` | `2` | Increase bounding boxes thickness |
| reg_pts | `list` | `[(20, 400), (1260, 400)]` | Points defining the Region Area |
| classes_names | `dict` | `model.model.names` | Dictionary of Class Names |

@ -36,12 +36,13 @@ Speed estimation is the process of calculating the rate of movement of an object
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
# Video writer
video_writer = cv2.VideoWriter("speed_estimation.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
line_pts = [(0, 360), (1280, 360)]

@ -28,11 +28,11 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
model = YOLO("yolov8n.pt")
names = model.model.names
cap = cv2.VideoCapture("path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter('visioneye-pinpoint.avi', cv2.VideoWriter_fourcc(*'MJPG'),
30, (int(cap.get(3)), int(cap.get(4))))
out = cv2.VideoWriter('visioneye-pinpoint.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
center_point = (-10, int(cap.get(4)))
center_point = (-10, h)
while True:
ret, im0 = cap.read()
@ -69,11 +69,11 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
model = YOLO("yolov8n.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
out = cv2.VideoWriter('visioneye-pinpoint.avi', cv2.VideoWriter_fourcc(*'MJPG'),
30, (int(cap.get(3)), int(cap.get(4))))
out = cv2.VideoWriter('visioneye-pinpoint.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
center_point = (-10, int(cap.get(4)))
center_point = (-10, h)
while True:
ret, im0 = cap.read()

@ -34,6 +34,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
model = YOLO("yolov8n-pose.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
gym_object = ai_gym.AIGym() # init AI GYM module
gym_object.set_args(line_thickness=2,
@ -63,11 +64,12 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
model = YOLO("yolov8n-pose.pt")
cap = cv2.VideoCapture("path/to/video/file.mp4")
assert cap.isOpened(), "Error reading video file"
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
video_writer = cv2.VideoWriter("workouts.avi",
cv2.VideoWriter_fourcc(*'mp4v'),
int(cap.get(5)),
(int(cap.get(3)), int(cap.get(4))))
fps,
(w, h))
gym_object = ai_gym.AIGym() # init AI GYM module
gym_object.set_args(line_thickness=2,

@ -84,11 +84,13 @@
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
"assert cap.isOpened(), \"Error reading video file\"\n",
"\n",
"w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
"\n",
"# Video writer\n",
"video_writer = cv2.VideoWriter(\"heatmap_output.avi\",\n",
"video_writer = cv2.VideoWriter(\"/content/people walking gray.mp4\",\n",
" cv2.VideoWriter_fourcc(*'mp4v'),\n",
" int(cap.get(5)),\n",
" (int(cap.get(3)), int(cap.get(4))))\n",
" fps,\n",
" (w, h))\n",
"\n",
"# Init heatmap\n",
"heatmap_obj = heatmap.Heatmap()\n",

@ -84,14 +84,16 @@
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
"assert cap.isOpened(), \"Error reading video file\"\n",
"\n",
"w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
"\n",
"# Define line points\n",
"line_points = [(20, 400), (1080, 400)]\n",
"\n",
"# Video writer\n",
"video_writer = cv2.VideoWriter(\"object_counting_output.avi\",\n",
" cv2.VideoWriter_fourcc(*'mp4v'),\n",
" int(cap.get(5)),\n",
" (int(cap.get(3)), int(cap.get(4))))\n",
" fps,\n",
" (w, h))\n",
"\n",
"# Init Object Counter\n",
"counter = object_counter.ObjectCounter()\n",

@ -85,7 +85,7 @@
{
"cell_type": "code",
"source": [
"!yolo track source=\"/content/people walking gray.mp4\" save=True"
"!yolo track source=\"/path/to/video/file.mp4\" save=True"
],
"metadata": {
"id": "-XJqhOwo6iqT"
@ -124,13 +124,12 @@
"cap = cv2.VideoCapture(video_path)\n",
"assert cap.isOpened(), \"Error reading video file\"\n",
"\n",
"frame_width = int(cap.get(3))\n",
"frame_height = int(cap.get(4))\n",
"size = (frame_width, frame_height)\n",
"result = cv2.VideoWriter('object_tracking.avi',\n",
" cv2.VideoWriter_fourcc(*'MJPG'),\n",
" int(cap.get(5)), size)\n",
"w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))\n",
"\n",
"result = cv2.VideoWriter(\"object_tracking.avi\",\n",
" cv2.VideoWriter_fourcc(*'mp4v'),\n",
" fps,\n",
" (w, h))\n",
"\n",
"while cap.isOpened():\n",
" success, frame = cap.read()\n",
@ -175,7 +174,7 @@
"metadata": {
"id": "Cx-u59HQdu2o"
},
"execution_count": 3,
"execution_count": null,
"outputs": []
},
{

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.236'
__version__ = '8.0.237'
from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO

@ -120,7 +120,7 @@ class Heatmap:
self.counting_region = Polygon([(20, 400), (1260, 400)]) # dummy points
# Heatmap new frame
self.heatmap = np.zeros((int(self.imw), int(self.imh)), dtype=np.float32)
self.heatmap = np.zeros((int(self.imh), int(self.imw)), dtype=np.float32)
self.count_txt_thickness = count_txt_thickness
self.count_txt_color = count_txt_color

@ -33,6 +33,8 @@ class ObjectCounter:
self.im0 = None
self.tf = None
self.view_img = False
self.view_in_counts = True
self.view_out_counts = True
self.names = None # Classes names
self.annotator = None # Annotator
@ -61,6 +63,8 @@ class ObjectCounter:
line_thickness=2,
track_thickness=2,
view_img=False,
view_in_counts=True,
view_out_counts=True,
draw_tracks=False,
count_txt_thickness=2,
count_txt_color=(0, 0, 0),
@ -74,6 +78,8 @@ class ObjectCounter:
Args:
line_thickness (int): Line thickness for bounding boxes.
view_img (bool): Flag to control whether to display the video stream.
view_in_counts (bool): Flag to control whether to display the incounts on video stream.
view_out_counts (bool): Flag to control whether to display the outcounts on video stream.
reg_pts (list): Initial list of points defining the counting region.
classes_names (dict): Classes names
track_thickness (int): Track thickness
@ -88,6 +94,8 @@ class ObjectCounter:
"""
self.tf = line_thickness
self.view_img = view_img
self.view_in_counts = view_in_counts
self.view_out_counts = view_out_counts
self.track_thickness = track_thickness
self.draw_tracks = draw_tracks
@ -192,11 +200,23 @@ class ObjectCounter:
incount_label = 'In Count : ' + f'{self.in_counts}'
outcount_label = 'OutCount : ' + f'{self.out_counts}'
self.annotator.count_labels(in_count=incount_label,
out_count=outcount_label,
count_txt_size=self.count_txt_thickness,
txt_color=self.count_txt_color,
color=self.count_color)
# Display counts based on user choice
counts_label = None
if not self.view_in_counts and not self.view_out_counts:
counts_label = None
elif not self.view_in_counts:
counts_label = outcount_label
elif not self.view_out_counts:
counts_label = incount_label
else:
counts_label = incount_label + ' ' + outcount_label
if counts_label is not None:
self.annotator.count_labels(counts=counts_label,
count_txt_size=self.count_txt_thickness,
txt_color=self.count_txt_color,
color=self.count_color)
def display_frames(self):
"""Display frame."""

@ -291,12 +291,11 @@ class Annotator:
cv2.polylines(self.im, [points], isClosed=False, color=color, thickness=track_thickness)
cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1)
def count_labels(self, in_count=0, out_count=0, count_txt_size=2, color=(255, 255, 255), txt_color=(0, 0, 0)):
def count_labels(self, counts=0, count_txt_size=2, color=(255, 255, 255), txt_color=(0, 0, 0)):
"""
Plot counts for object counter
Args:
in_count (int): in count value
out_count (int): out count value
counts (int): objects counts value
count_txt_size (int): text size for counts display
color (tuple): background color of counts display
txt_color (tuple): text color of counts display
@ -307,37 +306,24 @@ class Annotator:
gap = int(24 * tl) # gap between in_count and out_count based on line_thickness
# Get text size for in_count and out_count
t_size_in = cv2.getTextSize(str(in_count), 0, fontScale=tl / 2, thickness=tf)[0]
t_size_out = cv2.getTextSize(str(out_count), 0, fontScale=tl / 2, thickness=tf)[0]
t_size_in = cv2.getTextSize(str(counts), 0, fontScale=tl / 2, thickness=tf)[0]
# Calculate positions for in_count and out_count labels
text_width = max(t_size_in[0], t_size_out[0])
text_x1 = (self.im.shape[1] - text_width - 120 * self.tf) // 2 - gap
text_x2 = (self.im.shape[1] - text_width + 120 * self.tf) // 2 + gap
text_y = max(t_size_in[1], t_size_out[1])
# Calculate positions for counts label
text_width = t_size_in[0]
text_x = (self.im.shape[1] - text_width) // 2 # Center x-coordinate
text_y = t_size_in[1]
# Create a rounded rectangle for in_count
cv2.rectangle(self.im, (text_x1 - 5, text_y - 5), (text_x1 + text_width + 7, text_y + t_size_in[1] + 7), color,
cv2.rectangle(self.im, (text_x - 5, text_y - 5), (text_x + text_width + 7, text_y + t_size_in[1] + 7), color,
-1)
cv2.putText(self.im,
str(in_count), (text_x1, text_y + t_size_in[1]),
str(counts), (text_x, text_y + t_size_in[1]),
0,
tl / 2,
txt_color,
self.tf,
lineType=cv2.LINE_AA)
# Create a rounded rectangle for out_count
cv2.rectangle(self.im, (text_x2 - 5, text_y - 5), (text_x2 + text_width + 7, text_y + t_size_out[1] + 7), color,
-1)
cv2.putText(self.im,
str(out_count), (text_x2, text_y + t_size_out[1]),
0,
tl / 2,
txt_color,
thickness=self.tf,
lineType=cv2.LINE_AA)
@staticmethod
def estimate_pose_angle(a, b, c):
"""Calculate the pose angle for object

Loading…
Cancel
Save