diff --git a/infer-seg-without-torch.py b/infer-seg-without-torch.py index 98c9cd4..f1923e3 100644 --- a/infer-seg-without-torch.py +++ b/infer-seg-without-torch.py @@ -4,7 +4,7 @@ from pathlib import Path import cv2 import numpy as np -from config import CLASSES, COLORS +from config import ALPHA, CLASSES, COLORS, MASK_COLORS from models.utils import blob, letterbox, path_to_list, seg_postprocess @@ -41,9 +41,12 @@ def main(args: argparse.Namespace) -> None: seg_img = seg_img[dh:H - dh, dw:W - dw, [2, 1, 0]] bboxes, scores, labels, masks = seg_postprocess( data, bgr.shape[:2], args.conf_thres, args.iou_thres) - mask, mask_color = [m[:, dh:H - dh, dw:W - dw, :] for m in masks] - inv_alph_masks = (1 - mask * 0.5).cumprod(0) - mcs = (mask_color * inv_alph_masks).sum(0) * 2 + masks = masks[:, dh:H - dh, dw:W - dw, :] + mask_colors = MASK_COLORS[labels % len(MASK_COLORS)] + mask_colors = mask_colors.reshape(-1, 1, 1, 3) * ALPHA + mask_colors = masks @ mask_colors + inv_alph_masks = (1 - masks * 0.5).cumprod(0) + mcs = (mask_colors * inv_alph_masks).sum(0) * 2 seg_img = (seg_img * inv_alph_masks[-1] + mcs) * 255 draw = cv2.resize(seg_img.astype(np.uint8), draw.shape[:2][::-1]) diff --git a/infer-seg.py b/infer-seg.py index 7254bbd..46e3251 100644 --- a/infer-seg.py +++ b/infer-seg.py @@ -6,7 +6,7 @@ import cv2 import numpy as np import torch -from config import CLASSES, COLORS +from config import ALPHA, CLASSES, COLORS, MASK_COLORS from models.torch_utils import seg_postprocess from models.utils import blob, letterbox, path_to_list @@ -37,13 +37,18 @@ def main(args: argparse.Namespace) -> None: tensor = torch.asarray(tensor, device=device) # inference data = Engine(tensor) + seg_img = torch.asarray(seg_img[dh:H - dh, dw:W - dw, [2, 1, 0]], device=device) bboxes, scores, labels, masks = seg_postprocess( data, bgr.shape[:2], args.conf_thres, args.iou_thres) - mask, mask_color = [m[:, dh:H - dh, dw:W - dw, :] for m in masks] - inv_alph_masks = (1 - mask * 0.5).cumprod(0) - mcs = (mask_color * inv_alph_masks).sum(0) * 2 + masks = masks[:, dh:H - dh, dw:W - dw, :] + indices = (labels % len(MASK_COLORS)).long() + mask_colors = torch.asarray(MASK_COLORS, device=device)[indices] + mask_colors = mask_colors.view(-1, 1, 1, 3) * ALPHA + mask_colors = masks @ mask_colors + inv_alph_masks = (1 - masks * 0.5).cumprod(0) + mcs = (mask_colors * inv_alph_masks).sum(0) * 2 seg_img = (seg_img * inv_alph_masks[-1] + mcs) * 255 draw = cv2.resize(seg_img.cpu().numpy().astype(np.uint8), draw.shape[:2][::-1]) diff --git a/models/engine.py b/models/engine.py index 0285bdd..5e33a44 100644 --- a/models/engine.py +++ b/models/engine.py @@ -303,7 +303,7 @@ class TRTModule(torch.nn.Module): for i in range(self.num_outputs): j = i + self.num_inputs - if self.is_dynamic: + if self.odynamic: shape = tuple(self.context.get_binding_shape(j)) output = torch.empty(size=shape, dtype=self.out_info[i].dtype, diff --git a/models/utils.py b/models/utils.py index 755faf9..a1d8063 100644 --- a/models/utils.py +++ b/models/utils.py @@ -121,5 +121,5 @@ def seg_postprocess( masks = cv2.resize(masks.transpose([1, 2, 0]), shape, interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1) - masks = np.ascontiguousarray((masks > 0.5)[..., None]) + masks = np.ascontiguousarray((masks > 0.5)[..., None], dtype=np.float32) return bboxes, scores, labels, masks diff --git a/requirements.txt b/requirements.txt index 9f69929..5e862dc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy +numpy<=1.23.5 onnx onnxsim opencv-python