|
|
|
@ -26,12 +26,13 @@ class ChannelAttention(nn.Layer): |
|
|
|
|
|
|
|
|
|
The original article refers to |
|
|
|
|
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module" |
|
|
|
|
(https://arxiv.org/abs/1807.06521) |
|
|
|
|
(https://arxiv.org/abs/1807.06521). |
|
|
|
|
|
|
|
|
|
Args: |
|
|
|
|
in_ch (int): The number of channels of the input features. |
|
|
|
|
ratio (int, optional): The channel reduction ratio. Default: 8. |
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
def __init__(self, in_ch, ratio=8): |
|
|
|
|
super().__init__() |
|
|
|
|
self.avg_pool = nn.AdaptiveAvgPool2D(1) |
|
|
|
@ -52,11 +53,12 @@ class SpatialAttention(nn.Layer): |
|
|
|
|
|
|
|
|
|
The original article refers to |
|
|
|
|
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module" |
|
|
|
|
(https://arxiv.org/abs/1807.06521) |
|
|
|
|
(https://arxiv.org/abs/1807.06521). |
|
|
|
|
|
|
|
|
|
Args: |
|
|
|
|
kernel_size (int, optional): The size of the convolutional kernel. Default: 7. |
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
def __init__(self, kernel_size=7): |
|
|
|
|
super().__init__() |
|
|
|
|
self.conv = BasicConv(2, 1, kernel_size, bias=False) |
|
|
|
@ -75,13 +77,14 @@ class CBAM(nn.Layer): |
|
|
|
|
|
|
|
|
|
The original article refers to |
|
|
|
|
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module" |
|
|
|
|
(https://arxiv.org/abs/1807.06521) |
|
|
|
|
(https://arxiv.org/abs/1807.06521). |
|
|
|
|
|
|
|
|
|
Args: |
|
|
|
|
in_ch (int): The number of channels of the input features. |
|
|
|
|
ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8. |
|
|
|
|
kernel_size (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7. |
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
def __init__(self, in_ch, ratio=8, kernel_size=7): |
|
|
|
|
super().__init__() |
|
|
|
|
self.ca = ChannelAttention(in_ch, ratio=ratio) |
|
|
|
|