Fix some typos (#4858)

* Fix some typos

Signed-off-by: lizz <lizz@sensetime.com>

* More

Signed-off-by: lizz <lizz@sensetime.com>

* More

Signed-off-by: lizz <lizz@sensetime.com>

* More

Signed-off-by: lizz <lizz@sensetime.com>
pull/4860/head
lizz 4 years ago committed by GitHub
parent ecb4a07c17
commit a5ee9b413b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      docs/changelog.md
  2. 2
      docs/tutorials/config.md
  3. 2
      mmdet/core/bbox/assigners/atss_assigner.py
  4. 3
      mmdet/core/bbox/assigners/base_assigner.py
  5. 2
      mmdet/core/bbox/assigners/hungarian_assigner.py
  6. 2
      mmdet/core/bbox/assigners/max_iou_assigner.py
  7. 2
      mmdet/core/bbox/coder/base_bbox_coder.py
  8. 4
      mmdet/core/bbox/coder/bucketing_bbox_coder.py
  9. 2
      mmdet/core/bbox/iou_calculators/iou2d_calculator.py
  10. 4
      mmdet/core/bbox/match_costs/match_cost.py
  11. 2
      mmdet/core/evaluation/class_names.py
  12. 2
      mmdet/core/evaluation/mean_ap.py
  13. 13
      mmdet/core/mask/structures.py
  14. 1
      mmdet/datasets/custom.py
  15. 2
      mmdet/datasets/pipelines/auto_augment.py
  16. 2
      mmdet/datasets/pipelines/transforms.py
  17. 2
      mmdet/models/backbones/detectors_resnet.py
  18. 2
      mmdet/models/backbones/hrnet.py
  19. 4
      mmdet/models/backbones/regnet.py
  20. 4
      mmdet/models/dense_heads/anchor_free_head.py
  21. 2
      mmdet/models/dense_heads/atss_head.py
  22. 6
      mmdet/models/dense_heads/cascade_rpn_head.py
  23. 2
      mmdet/models/dense_heads/corner_head.py
  24. 10
      mmdet/models/dense_heads/fcos_head.py
  25. 2
      mmdet/models/dense_heads/fsaf_head.py
  26. 2
      mmdet/models/dense_heads/gfl_head.py
  27. 8
      mmdet/models/dense_heads/paa_head.py
  28. 2
      mmdet/models/dense_heads/reppoints_head.py
  29. 4
      mmdet/models/dense_heads/transformer_head.py
  30. 2
      mmdet/models/detectors/sparse_rcnn.py
  31. 2
      mmdet/models/detectors/yolact.py
  32. 2
      mmdet/models/losses/gaussian_focal_loss.py
  33. 2
      mmdet/models/necks/bfp.py
  34. 2
      mmdet/models/necks/fpn_carafe.py
  35. 2
      mmdet/models/necks/hrfpn.py
  36. 3
      mmdet/models/roi_heads/base_roi_head.py
  37. 6
      mmdet/models/roi_heads/bbox_heads/sabl_head.py
  38. 2
      mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
  39. 2
      mmdet/models/roi_heads/mask_heads/mask_point_head.py
  40. 4
      mmdet/models/roi_heads/sparse_roi_head.py
  41. 12
      mmdet/models/utils/transformer.py
  42. 4
      tools/analysis_tools/robustness_eval.py
  43. 2
      tools/analysis_tools/test_robustness.py
  44. 2
      tools/dataset_converters/cityscapes.py

@ -393,7 +393,7 @@ Function `get_subset_by_classes` in dataset is refactored and only filters out i
- Implement FCOS training tricks (#2935)
- Use BaseDenseHead as base class for anchor-base heads (#2963)
- Add `with_cp` for BasicBlock (#2891)
- Add `stem_channles` argument for ResNet (#2954)
- Add `stem_channels` argument for ResNet (#2954)
**Improvements**

@ -290,7 +290,7 @@ test_pipeline = [
flip=False, # Whether to flip images during testing
transforms=[
dict(type='Resize', # Use resize augmentation
keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be supressed by the img_scale set above.
keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be suppressed by the img_scale set above.
dict(type='RandomFlip'), # Thought RandomFlip is added in pipeline, it is not used because flip=False
dict(
type='Normalize', # Normalization config, the values are from img_norm_cfg

@ -48,7 +48,7 @@ class ATSSAssigner(BaseAssigner):
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as postive
the threshold as positive
6. limit the positive sample's center in gt

@ -6,5 +6,4 @@ class BaseAssigner(metaclass=ABCMeta):
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
"""Assign boxes to either a ground truth boxe or a negative boxes."""
pass
"""Assign boxes to either a ground truth boxes or a negative boxes."""

@ -18,7 +18,7 @@ class HungarianAssigner(BaseAssigner):
This class computes an assignment between the targets and the predictions
based on the costs. The costs are weighted sum of three components:
classfication cost, regression L1 cost and regression iou cost. The
classification cost, regression L1 cost and regression iou cost. The
targets don't include the no_object, so generally there are more
predictions than targets. After the one-to-one matching, the un-matched
are treated as backgrounds. Thus each query prediction will be assigned

@ -182,7 +182,7 @@ class MaxIoUAssigner(BaseAssigner):
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
if self.match_low_quality:
# Low-quality matching will overwirte the assigned_gt_inds assigned
# Low-quality matching will overwrite the assigned_gt_inds assigned
# in Step 3. Thus, the assigned gt might not be the best one for
# prediction.
# For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,

@ -10,10 +10,8 @@ class BaseBBoxCoder(metaclass=ABCMeta):
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
pass
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
pass

@ -10,7 +10,7 @@ from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class BucketingBBoxCoder(BaseBBoxCoder):
"""Bucketing BBox Coder for Side-Aware Bounday Localization (SABL).
"""Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented here.
@ -255,7 +255,7 @@ def bbox2bucket(proposals,
bucket_cls_d_weights
],
dim=-1)
# ignore second nearest buckets for cls if necessay
# ignore second nearest buckets for cls if necessary
if cls_ignore_neighbor:
bucket_cls_weights = (~((bucket_cls_weights == 1) &
(bucket_labels == 0))).float()

@ -88,7 +88,7 @@ def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes's last dimenstion is 4
# Either the boxes are empty or the length of boxes' last dimension is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)

@ -136,7 +136,7 @@ class ClassificationCost(object):
# Following the official DETR repo, contrary to the loss that
# NLL is used, we approximate it in 1 - cls_score[gt_label].
# The 1 is a constant that doesn't change the matching,
# so it can be ommitted.
# so it can be omitted.
cls_score = cls_pred.softmax(-1)
cls_cost = -cls_score[:, gt_labels]
return cls_cost * self.weight
@ -179,6 +179,6 @@ class IoUCost(object):
# overlaps: [num_bboxes, num_gt]
overlaps = bbox_overlaps(
bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
# The 1 is a constant that doesn't change the matching, so ommitted.
# The 1 is a constant that doesn't change the matching, so omitted.
iou_cost = -overlaps
return iou_cost * self.weight

@ -94,7 +94,7 @@ dataset_aliases = {
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
'cityscapes': ['cityscapes']
}

@ -124,7 +124,7 @@ def tpfp_imagenet(det_bboxes,
# find best overlapped available gt
for j in range(num_gts):
# different from PASCAL VOC: allow finding other gts if the
# best overlaped ones are already matched by other det bboxes
# best overlapped ones are already matched by other det bboxes
if gt_covered[j]:
continue
elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:

@ -23,7 +23,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The rescaled masks.
"""
pass
@abstractmethod
def resize(self, out_shape, interpolation='nearest'):
@ -36,7 +35,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The resized masks.
"""
pass
@abstractmethod
def flip(self, flip_direction='horizontal'):
@ -48,7 +46,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The flipped masks.
"""
pass
@abstractmethod
def pad(self, out_shape, pad_val):
@ -61,7 +58,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
BaseInstanceMasks: The padded masks.
"""
pass
@abstractmethod
def crop(self, bbox):
@ -73,7 +69,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Return:
BaseInstanceMasks: The cropped masks.
"""
pass
@abstractmethod
def crop_and_resize(self,
@ -99,18 +94,15 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Return:
BaseInstanceMasks: the cropped and resized masks.
"""
pass
@abstractmethod
def expand(self, expanded_h, expanded_w, top, left):
"""see :class:`Expand`."""
pass
@property
@abstractmethod
def areas(self):
"""ndarray: areas of each instance."""
pass
@abstractmethod
def to_ndarray(self):
@ -119,7 +111,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Return:
ndarray: Converted masks in the format of ndarray.
"""
pass
@abstractmethod
def to_tensor(self, dtype, device):
@ -132,7 +123,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
Tensor: Converted masks in the format of Tensor.
"""
pass
@abstractmethod
def translate(self,
@ -154,7 +144,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
Translated masks.
"""
pass
def shear(self,
out_shape,
@ -176,7 +165,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
ndarray: Sheared masks.
"""
pass
@abstractmethod
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
@ -195,7 +183,6 @@ class BaseInstanceMasks(metaclass=ABCMeta):
Returns:
Rotated masks.
"""
pass
class BitmapMasks(BaseInstanceMasks):

@ -262,7 +262,6 @@ class CustomDataset(Dataset):
def format_results(self, results, **kwargs):
"""Place holder to format result to dataset specific output."""
pass
def evaluate(self,
results,

@ -634,7 +634,7 @@ class Translate(object):
min_y = np.maximum(0, min_y + offset)
max_y = np.minimum(h, max_y + offset)
# the boxs translated outside of image will be filtered along with
# the boxes translated outside of image will be filtered along with
# the corresponding masks, by invoking ``_filter_invalid``.
results[key] = np.concatenate([min_x, min_y, max_x, max_y],
axis=-1)

@ -120,7 +120,7 @@ class Resize(object):
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \

@ -149,7 +149,7 @@ class ResLayer(nn.Sequential):
rfp_inplanes=None,
**kwargs):
self.block = block
assert downsample_first, f'downsampel_first={downsample_first} is ' \
assert downsample_first, f'downsample_first={downsample_first} is ' \
'not supported in DetectoRS'
downsample = None

@ -527,7 +527,7 @@ class HRNet(nn.Module):
return y_list
def train(self, mode=True):
"""Convert the model into training mode whill keeping the normalization
"""Convert the model into training mode will keeping the normalization
layer freezed."""
super(HRNet, self).train(mode)
if mode and self.norm_eval:

@ -21,7 +21,7 @@ class RegNet(ResNet):
- wm (float): quantization parameter to quantize the width
- depth (int): depth of the backbone
- group_w (int): width of group
- bot_mul (float): bottleneck ratio, i.e. expansion of bottlneck.
- bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
strides (Sequence[int]): Strides of the first block of each stage.
base_channels (int): Base channels after stem layer.
in_channels (int): Number of input image channels. Default: 3.
@ -252,7 +252,7 @@ class RegNet(ResNet):
@staticmethod
def quantize_float(number, divisor):
"""Converts a float to closest non-zero int divisible by divior.
"""Converts a float to closest non-zero int divisible by divisor.
Args:
number (int): Original number to be quantized.

@ -197,7 +197,7 @@ class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
"""Forward features of a single scale levle.
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
@ -274,7 +274,7 @@ class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerss targets for points
"""Compute regression, classification and centerness targets for points
in multiple images.
Args:

@ -612,7 +612,7 @@ class ATSSHead(AnchorHead):
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4)
pos_inds (Tensor): Indices of postive anchor with shape
pos_inds (Tensor): Indices of positive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).

@ -53,7 +53,7 @@ class AdaptiveConv(nn.Module):
assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
if self.adapt_type == 'offset':
assert stride == 1 and padding == 1 and groups == 1, \
'Addptive conv offset mode only supports padding: {1}, ' \
'Adaptive conv offset mode only supports padding: {1}, ' \
f'stride: {1}, groups: {1}'
self.conv = DeformConv2d(
in_channels,
@ -99,7 +99,7 @@ class StageCascadeRPNHead(RPNHead):
in_channels (int): Number of channels in the input feature map.
anchor_generator (dict): anchor generator config.
adapt_cfg (dict): adaptation config.
bridged_feature (bool, optional): wheater update rpn feature.
bridged_feature (bool, optional): whether update rpn feature.
Default: False.
with_cls (bool, optional): wheather use classification branch.
Default: True.
@ -471,7 +471,7 @@ class StageCascadeRPNHead(RPNHead):
num_total_samples = num_total_pos + num_total_neg
else:
# 200 is hard-coded average factor,
# which follows guilded anchoring.
# which follows guided anchoring.
num_total_samples = sum([label.numel()
for label in labels_list]) / 200.0

@ -805,7 +805,7 @@ class CornerHead(BaseDenseHead):
return feat
def _local_maximum(self, heat, kernel=3):
"""Extract local maximum pixel with given kernal.
"""Extract local maximum pixel with given kernel.
Args:
heat (Tensor): Target heatmap.

@ -16,7 +16,7 @@ class FCOSHead(AnchorFreeHead):
"""Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
predicted at each pixel and a centerness measure is used to suppress
low-quality predictions.
Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training
tricks used in official repo, which will bring remarkable mAP gains
@ -116,14 +116,14 @@ class FCOSHead(AnchorFreeHead):
bbox_preds (list[Tensor]): Box energies / deltas for each \
scale level, each is a 4D-tensor, the channel number is \
num_points * 4.
centernesses (list[Tensor]): Centerss for each scale level, \
centernesses (list[Tensor]): centerness for each scale level, \
each is a 4D-tensor, the channel number is num_points * 1.
"""
return multi_apply(self.forward_single, feats, self.scales,
self.strides)
def forward_single(self, x, scale, stride):
"""Forward features of a single scale levle.
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
@ -171,7 +171,7 @@ class FCOSHead(AnchorFreeHead):
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
centernesses (list[Tensor]): Centerss for each scale level, each
centernesses (list[Tensor]): centerness for each scale level, each
is a 4D-tensor, the channel number is num_points * 1.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
@ -466,7 +466,7 @@ class FCOSHead(AnchorFreeHead):
return points
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerss targets for points
"""Compute regression, classification and centerness targets for points
in multiple images.
Args:

@ -394,7 +394,7 @@ class FSAFHead(RetinaHead):
- cls_loss: Reduced corrected classification loss. Scalar.
- reg_loss: Reduced corrected regression loss. Scalar.
- pos_flags (Tensor): Corrected bool tensor indicating the
final postive anchors. Shape: (num_anchors, ).
final positive anchors. Shape: (num_anchors, ).
"""
loc_weight = torch.ones_like(reg_loss)
cls_weight = torch.ones_like(cls_loss)

@ -575,7 +575,7 @@ class GFLHead(AnchorHead):
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4).
pos_inds (Tensor): Indices of postive anchor with shape
pos_inds (Tensor): Indices of positive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).

@ -385,7 +385,7 @@ class PAAHead(ATSSHead):
- pos_inds_temp (Tensor): Indices of positive samples.
- ignore_inds_temp (Tensor): Indices of ignore samples.
"""
# The implementation is (c) in Fig.3 in origin paper intead of (b).
# The implementation is (c) in Fig.3 in origin paper instead of (b).
# You can refer to issues such as
# https://github.com/kkhoot/PAA/issues/8 and
# https://github.com/kkhoot/PAA/issues/9.
@ -632,9 +632,9 @@ class PAAHead(ATSSHead):
after voting, with shape (num_anchors,).
"""
candidate_mask = mlvl_nms_scores > score_thr
candidate_mask_nozeros = candidate_mask.nonzero()
candidate_inds = candidate_mask_nozeros[:, 0]
candidate_labels = candidate_mask_nozeros[:, 1]
candidate_mask_nonzeros = candidate_mask.nonzero()
candidate_inds = candidate_mask_nonzeros[:, 0]
candidate_labels = candidate_mask_nonzeros[:, 1]
candidate_bboxes = mlvl_bboxes[candidate_inds]
candidate_scores = mlvl_nms_scores[candidate_mask]
det_bboxes_voted = []

@ -167,7 +167,7 @@ class RepPointsHead(AnchorFreeHead):
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_fisrt=True, the point set is represented as
:param y_first: if y_first=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:return: each points set is converting to a bbox [x1, y1, x2, y2].

@ -582,7 +582,7 @@ class TransformerHead(AnchorFreeHead):
[nb_dec, bs, num_query, 4].
img_metas (list[dict]): Meta information of each image.
rescale (bool, optional): If True, return boxes in original
image space. Defalut False.
image space. Default False.
Returns:
list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
@ -593,7 +593,7 @@ class TransformerHead(AnchorFreeHead):
the corresponding box.
"""
# NOTE defaultly only using outputs from the last feature level,
# and only the ouputs from the last decoder layer is used.
# and only the outputs from the last decoder layer is used.
cls_scores = all_cls_scores_list[-1][-1]
bbox_preds = all_bbox_preds_list[-1][-1]

@ -46,7 +46,7 @@ class SparseRCNN(TwoStageDetector):
assert proposals is None, 'Sparse R-CNN does not support' \
' external proposals'
assert gt_masks is None, 'Sparse R-CNN does not instance segmenntaion'
assert gt_masks is None, 'Sparse R-CNN does not instance segmentation'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \

@ -25,7 +25,7 @@ class YOLACT(SingleStageDetector):
self.init_segm_mask_weights()
def init_segm_mask_weights(self):
"""Initialize weights of the YOLACT semg head and YOLACT mask head."""
"""Initialize weights of the YOLACT segm head and YOLACT mask head."""
self.segm_head.init_weights()
self.mask_head.init_weights()

@ -41,7 +41,7 @@ class GaussianFocalLoss(nn.Module):
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negtive samples.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""

@ -8,7 +8,7 @@ from ..builder import NECKS
@NECKS.register_module()
class BFP(nn.Module):
"""BFP (Balanced Feature Pyrmamids)
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to

@ -10,7 +10,7 @@ class FPN_CARAFE(nn.Module):
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
choice for upsample methods during the top-down pathway.
It can reproduce the preformance of ICCV 2019 paper
It can reproduce the performance of ICCV 2019 paper
CARAFE: Content-Aware ReAssembly of FEatures
Please refer to https://arxiv.org/abs/1905.02188 for more details.

@ -9,7 +9,7 @@ from ..builder import NECKS
@NECKS.register_module()
class HRFPN(nn.Module):
"""HRFPN (High Resolution Feature Pyrmamids)
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.

@ -81,7 +81,6 @@ class BaseRoIHead(nn.Module, metaclass=ABCMeta):
gt_masks=None,
**kwargs):
"""Forward function during training."""
pass
async def async_simple_test(self, x, img_meta, **kwargs):
"""Asynchronized test function."""
@ -95,7 +94,6 @@ class BaseRoIHead(nn.Module, metaclass=ABCMeta):
rescale=False,
**kwargs):
"""Test without augmentation."""
pass
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
@ -103,4 +101,3 @@ class BaseRoIHead(nn.Module, metaclass=ABCMeta):
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
pass

@ -244,13 +244,13 @@ class SABLHead(nn.Module):
reg_fy = torch.transpose(reg_fy, 1, 2)
return reg_fx.contiguous(), reg_fy.contiguous()
def reg_pred(self, x, offfset_fcs, cls_fcs):
"""Predict bucketing esimation (cls_pred) and fine regression (offset
def reg_pred(self, x, offset_fcs, cls_fcs):
"""Predict bucketing estimation (cls_pred) and fine regression (offset
pred) with side-aware features."""
x_offset = x.view(-1, self.reg_in_channels)
x_cls = x.view(-1, self.reg_in_channels)
for fc in offfset_fcs:
for fc in offset_fcs:
x_offset = self.relu(fc(x_offset))
for fc in cls_fcs:
x_cls = self.relu(fc(x_cls))

@ -304,7 +304,7 @@ class FCNMaskHead(nn.Module):
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks acoording to boxes.
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/

@ -49,7 +49,7 @@ class MaskPointHead(nn.Module):
super().__init__()
self.num_fcs = num_fcs
self.in_channels = in_channels
self.fc_channles = fc_channels
self.fc_channels = fc_channels
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.coarse_pred_each_layer = coarse_pred_each_layer

@ -190,10 +190,10 @@ class SparseRoIHead(CascadeRoIHead):
cls_pred_list = bbox_results['detach_cls_score_list']
proposal_list = bbox_results['detach_proposal_list']
for i in range(num_imgs):
normolize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /
normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /
imgs_whwh[i])
assign_result = self.bbox_assigner[stage].assign(
normolize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],
normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],
gt_labels[i], img_metas[i])
sampling_result = self.bbox_sampler[stage].sample(
assign_result, proposal_list[i], gt_bboxes[i])

@ -109,7 +109,7 @@ class FFN(nn.Module):
`MultiheadAttention`.
feedforward_channels (int): The hidden dimension of FFNs.
num_fcs (int, optional): The number of fully-connected layers in
FFNs. Defaluts to 2.
FFNs. Defaults to 2.
act_cfg (dict, optional): The activation config for FFNs.
dropout (float, optional): Probability of an element to be
zeroed. Default 0.0.
@ -179,7 +179,7 @@ class TransformerEncoderLayer(nn.Module):
order (tuple[str]): The order for encoder layer. Valid examples are
('selfattn', 'norm', 'ffn', 'norm') and ('norm', 'selfattn',
'norm', 'ffn'). Default ('selfattn', 'norm', 'ffn', 'norm').
act_cfg (dict): The activation config for FFNs. Defalut ReLU.
act_cfg (dict): The activation config for FFNs. Default ReLU.
norm_cfg (dict): Config dict for normalization layer. Default
layer normalization.
num_fcs (int): The number of fully-connected layers for FFNs.
@ -280,7 +280,7 @@ class TransformerDecoderLayer(nn.Module):
('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn', 'norm') and
('norm', 'selfattn', 'norm', 'multiheadattn', 'norm', 'ffn').
Default the former.
act_cfg (dict): Same as `TransformerEncoderLayer`. Defalut ReLU.
act_cfg (dict): Same as `TransformerEncoderLayer`. Default ReLU.
norm_cfg (dict): Config dict for normalization layer. Default
layer normalization.
num_fcs (int): The number of fully-connected layers in FFNs.
@ -412,7 +412,7 @@ class TransformerEncoder(nn.Module):
feedforward_channels (int): Same as `TransformerEncoderLayer`.
dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
order (tuple[str]): Same as `TransformerEncoderLayer`.
act_cfg (dict): Same as `TransformerEncoderLayer`. Defalut ReLU.
act_cfg (dict): Same as `TransformerEncoderLayer`. Default ReLU.
norm_cfg (dict): Same as `TransformerEncoderLayer`. Default
layer normalization.
num_fcs (int): Same as `TransformerEncoderLayer`. Default 2.
@ -496,7 +496,7 @@ class TransformerDecoder(nn.Module):
feedforward_channels (int): Same as `TransformerDecoderLayer`.
dropout (float): Same as `TransformerDecoderLayer`. Default 0.0.
order (tuple[str]): Same as `TransformerDecoderLayer`.
act_cfg (dict): Same as `TransformerDecoderLayer`. Defalut ReLU.
act_cfg (dict): Same as `TransformerDecoderLayer`. Default ReLU.
norm_cfg (dict): Same as `TransformerDecoderLayer`. Default
layer normalization.
num_fcs (int): Same as `TransformerDecoderLayer`. Default 2.
@ -621,7 +621,7 @@ class Transformer(nn.Module):
encoder and decoder.
dropout (float): Probability of an element to be zeroed. Default 0.0.
act_cfg (dict): Activation config for FFNs used in both encoder
and decoder. Defalut ReLU.
and decoder. Default ReLU.
norm_cfg (dict): Config dict for normalization used in both encoder
and decoder. Default layer normalization.
num_fcs (int): The number of fully-connected layers in FFNs, which is

@ -89,7 +89,7 @@ def get_coco_style_results(filename,
print(f'Mean Performance under Corruption [mPC] ({task})')
print_coco_results(mPC)
if 'rPC' in prints:
print(f'Realtive Performance under Corruption [rPC] ({task})')
print(f'Relative Performance under Corruption [rPC] ({task})')
print_coco_results(rPC)
else:
if 'P' in prints:
@ -146,7 +146,7 @@ def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
print('Mean Performance under Corruption [mPC] in AP50 = '
f'{np.mean(mPC):0.3f}')
if 'rPC' in prints:
print('Realtive Performance under Corruption [rPC] in % = '
print('Relative Performance under Corruption [rPC] in % = '
f'{np.mean(rPC) * 100:0.1f}')
return np.mean(results, axis=2, keepdims=True)

@ -367,7 +367,7 @@ def main():
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print filan results
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate

@ -140,7 +140,7 @@ def main():
for split, json_name in set_name.items():
print(f'Converting {split} into {json_name}')
with mmcv.Timer(
print_tmpl='It tooks {}s to convert Cityscapes annotation'):
print_tmpl='It took {}s to convert Cityscapes annotation'):
files = collect_files(
osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)

Loading…
Cancel
Save