diff --git a/ultralytics/cfg/models/v8/yolov8-2.yaml b/ultralytics/cfg/models/v8/yolov8-2.yaml new file mode 100644 index 0000000000..25011d6e58 --- /dev/null +++ b/ultralytics/cfg/models/v8/yolov8-2.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2fn, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2fn, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2fn, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2fn, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2fn, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2fn, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2fn, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2fn, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/nn/modules/__init__.py b/ultralytics/nn/modules/__init__.py index d785c008c8..1dd321602a 100644 --- a/ultralytics/nn/modules/__init__.py +++ b/ultralytics/nn/modules/__init__.py @@ -46,6 +46,7 @@ from .block import ( CBFuse, CBLinear, Silence, + C2fn, ) from .conv import ( CBAM, diff --git a/ultralytics/nn/modules/block.py b/ultralytics/nn/modules/block.py index c772f8127e..90354021fd 100644 --- a/ultralytics/nn/modules/block.py +++ b/ultralytics/nn/modules/block.py @@ -37,6 +37,7 @@ __all__ = ( "CBFuse", "CBLinear", "Silence", + "C2fn" ) @@ -238,6 +239,32 @@ class C2f(nn.Module): return self.cv2(torch.cat(y, 1)) +class C2fn(nn.Module): + """Faster Implementation of CSP Bottleneck with 2 convolutions.""" + + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): + """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups, + expansion. + """ + super().__init__() + self.c = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, 2 * self.c, 1, 1) + self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2) + self.m = nn.ModuleList(C2f(self.c, self.c, 1, shortcut, g) for _ in range(n)) + + def forward(self, x): + """Forward pass through C2f layer.""" + y = list(self.cv1(x).chunk(2, 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + def forward_split(self, x): + """Forward pass using split() instead of chunk().""" + y = list(self.cv1(x).split((self.c, self.c), 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + class C3(nn.Module): """CSP Bottleneck with 3 convolutions.""" diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py index 64ee7f5031..084360bfb5 100644 --- a/ultralytics/nn/tasks.py +++ b/ultralytics/nn/tasks.py @@ -19,6 +19,7 @@ from ultralytics.nn.modules import ( Bottleneck, BottleneckCSP, C2f, + C2fn, C2fAttn, ImagePoolingAttn, C3Ghost, @@ -856,6 +857,7 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) C1, C2, C2f, + C2fn, RepNCSPELAN4, ADown, SPPELAN, @@ -878,7 +880,7 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) ) # num heads args = [c1, c2, *args[1:]] - if m in (BottleneckCSP, C1, C2, C2f, C2fAttn, C3, C3TR, C3Ghost, C3x, RepC3): + if m in (BottleneckCSP, C1, C2, C2f, C2fn, C2fAttn, C3, C3TR, C3Ghost, C3x, RepC3): args.insert(2, n) # number of repeats n = 1 elif m is AIFI: