`ultralytics 8.1.9` replace `.size(0)` with `.shape[0]` (#7957)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
pull/12664/head^2 v8.1.9
Glenn Jocher 10 months ago committed by GitHub
parent 66b32bb4dd
commit 70d4a3752e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 14
      .github/workflows/docker.yaml
  2. 4
      docker/Dockerfile
  3. 4
      docker/Dockerfile-arm64
  4. 4
      docker/Dockerfile-conda
  5. 4
      docker/Dockerfile-cpu
  6. 4
      docker/Dockerfile-jetson
  7. 4
      docker/Dockerfile-python
  8. 2
      ultralytics/__init__.py
  9. 2
      ultralytics/engine/trainer.py
  10. 2
      ultralytics/models/sam/modules/decoders.py
  11. 2
      ultralytics/models/sam/modules/tiny_encoder.py
  12. 6
      ultralytics/trackers/utils/gmc.py
  13. 2
      ultralytics/utils/metrics.py
  14. 4
      ultralytics/utils/tal.py
  15. 4
      ultralytics/utils/torch_utils.py

@ -109,10 +109,18 @@ jobs:
- name: Build Image
if: github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true'
run: |
docker build --platform ${{ matrix.platforms }} -f docker/${{ matrix.dockerfile }} \
uses: nick-invision/retry@v2
with:
timeout_minutes: 60
retry_wait_seconds: 0
max_attempts: 1
command: |
docker build \
--platform ${{ matrix.platforms }} \
-f docker/${{ matrix.dockerfile }} \
-t ultralytics/ultralytics:${{ matrix.tags }} \
-t ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} .
-t ultralytics/ultralytics:${{ steps.get_version.outputs.version_tag }} \
.
- name: Run Tests
if: (github.event_name == 'push' || github.event.inputs[matrix.dockerfile] == 'true') && matrix.platforms == 'linux/amd64' && matrix.dockerfile != 'Dockerfile-conda' # arm64 images not supported on GitHub CI runners

@ -7,7 +7,9 @@ FROM pytorch/pytorch:2.1.0-cuda12.1-cudnn8-runtime
RUN pip install --no-cache nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package

@ -6,7 +6,9 @@
FROM arm64v8/ubuntu:22.04
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package

@ -6,7 +6,9 @@
FROM continuumio/miniconda3:latest
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
RUN apt update \

@ -6,7 +6,9 @@
FROM ubuntu:23.10
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package

@ -6,7 +6,9 @@
FROM nvcr.io/nvidia/l4t-pytorch:r35.2.1-pth2.0-py3
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package

@ -6,7 +6,9 @@
FROM python:3.10-slim-bookworm
# Downloads to user config dir
ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/
ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.Unicode.ttf \
/root/.config/Ultralytics/
# Install linux packages
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = "8.1.8"
__version__ = "8.1.9"
from ultralytics.data.explorer.explorer import Explorer
from ultralytics.models import RTDETR, SAM, YOLO

@ -400,7 +400,7 @@ class BaseTrainer:
# Log
mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB)
loss_len = self.tloss.shape[0] if len(self.tloss.size()) else 1
loss_len = self.tloss.shape[0] if len(self.tloss.shape) else 1
losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0)
if RANK in (-1, 0):
pbar.set_description(

@ -121,7 +121,7 @@ class MaskDecoder(nn.Module):
"""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.shape[0], -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask

@ -732,7 +732,7 @@ class TinyViT(nn.Module):
for i in range(start_i, len(self.layers)):
layer = self.layers[i]
x = layer(x)
B, _, C = x.size()
B, _, C = x.shape
x = x.view(B, 64, 64, C)
x = x.permute(0, 3, 1, 2)
return self.neck(x)

@ -258,7 +258,7 @@ class GMC:
# import matplotlib.pyplot as plt
# matches_img = np.hstack((self.prevFrame, frame))
# matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)
# W = np.size(self.prevFrame, 1)
# W = self.prevFrame.shape[1]
# for m in goodMatches:
# prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)
# curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)
@ -275,7 +275,7 @@ class GMC:
# plt.show()
# Find rigid matrix
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
if (prevPoints.shape[0] > 4) and (prevPoints.shape[0] == prevPoints.shape[0]):
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
@ -343,7 +343,7 @@ class GMC:
currPoints = np.array(currPoints)
# Find rigid matrix
if np.size(prevPoints, 0) > 4 and np.size(prevPoints, 0) == np.size(prevPoints, 0):
if (prevPoints.shape[0] > 4) and (prevPoints.shape[0] == prevPoints.shape[0]):
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
if self.downscale > 1.0:

@ -331,7 +331,7 @@ class ConfusionMatrix:
gt_bboxes (Array[M, 4]): Ground truth bounding boxes with xyxy format.
gt_cls (Array[M]): The class labels.
"""
if gt_cls.size(0) == 0: # Check if labels is empty
if gt_cls.shape[0] == 0: # Check if labels is empty
if detections is not None:
detections = detections[detections[:, 4] > self.conf]
detection_classes = detections[:, 5].int()

@ -56,8 +56,8 @@ class TaskAlignedAssigner(nn.Module):
fg_mask (Tensor): shape(bs, num_total_anchors)
target_gt_idx (Tensor): shape(bs, num_total_anchors)
"""
self.bs = pd_scores.size(0)
self.n_max_boxes = gt_bboxes.size(1)
self.bs = pd_scores.shape[0]
self.n_max_boxes = gt_bboxes.shape[1]
if self.n_max_boxes == 0:
device = gt_bboxes.device

@ -190,7 +190,7 @@ def fuse_conv_and_bn(conv, bn):
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
# Prepare spatial bias
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
b_conv = torch.zeros(conv.weight.shape[0], device=conv.weight.device) if conv.bias is None else conv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
@ -221,7 +221,7 @@ def fuse_deconv_and_bn(deconv, bn):
fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape))
# Prepare spatial bias
b_conv = torch.zeros(deconv.weight.size(1), device=deconv.weight.device) if deconv.bias is None else deconv.bias
b_conv = torch.zeros(deconv.weight.shape[1], device=deconv.weight.device) if deconv.bias is None else deconv.bias
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)

Loading…
Cancel
Save