[Style]: Adjust style

own
Bobholamovic 3 years ago
parent fd4118afe3
commit caf734693c
  1. 10
      paddlers/models/cd/models/__init__.py
  2. 3
      paddlers/models/cd/models/bit.py
  3. 3
      paddlers/models/cd/models/dsamnet.py
  4. 3
      paddlers/models/cd/models/dsifn.py
  5. 9
      paddlers/models/cd/models/layers/attention.py
  6. 4
      paddlers/models/cd/models/param_init.py
  7. 3
      paddlers/models/cd/models/snunet.py
  8. 3
      paddlers/models/cd/models/stanet.py
  9. 8
      paddlers/models/cd/models/unet_ef.py
  10. 8
      paddlers/models/cd/models/unet_siamconc.py
  11. 8
      paddlers/models/cd/models/unet_siamdiff.py

@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from .bit import BIT
from .cdnet import CDNet from .cdnet import CDNet
from .unet_ef import UNetEarlyFusion from .dsifn import DSIFN
from .unet_siamconc import UNetSiamConc
from .unet_siamdiff import UNetSiamDiff
from .stanet import STANet from .stanet import STANet
from .bit import BIT
from .snunet import SNUNet from .snunet import SNUNet
from .dsifn import DSIFN
from .dsamnet import DSAMNet from .dsamnet import DSAMNet
from .unet_ef import UNetEarlyFusion
from .unet_siamconc import UNetSiamConc
from .unet_siamdiff import UNetSiamDiff

@ -29,7 +29,7 @@ class BIT(nn.Layer):
The original article refers to The original article refers to
H. Chen, et al., "Remote Sensing Image Change Detection With Transformers" H. Chen, et al., "Remote Sensing Image Change Detection With Transformers"
(https://arxiv.org/abs/2103.00208) (https://arxiv.org/abs/2103.00208).
This implementation adopts pretrained encoders, as opposed to the original work where weights are randomly initialized. This implementation adopts pretrained encoders, as opposed to the original work where weights are randomly initialized.
@ -56,6 +56,7 @@ class BIT(nn.Layer):
Raises: Raises:
ValueError: When an unsupported backbone type is specified, or the number of backbone stages is not 3, 4, or 5. ValueError: When an unsupported backbone type is specified, or the number of backbone stages is not 3, 4, or 5.
""" """
def __init__( def __init__(
self, in_channels, num_classes, self, in_channels, num_classes,
backbone='resnet18', n_stages=4, backbone='resnet18', n_stages=4,

@ -28,7 +28,7 @@ class DSAMNet(nn.Layer):
The original article refers to The original article refers to
Q. Shi, et al., "A Deeply Supervised Attention Metric-Based Network and an Open Aerial Image Dataset for Remote Sensing Q. Shi, et al., "A Deeply Supervised Attention Metric-Based Network and an Open Aerial Image Dataset for Remote Sensing
Change Detection" Change Detection"
(https://ieeexplore.ieee.org/document/9467555) (https://ieeexplore.ieee.org/document/9467555).
Note that this implementation differs from the original work in two aspects: Note that this implementation differs from the original work in two aspects:
1. We do not use multiple dilation rates in layer 4 of the ResNet backbone. 1. We do not use multiple dilation rates in layer 4 of the ResNet backbone.
@ -40,6 +40,7 @@ class DSAMNet(nn.Layer):
ca_ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8. ca_ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8.
sa_kernel (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7. sa_kernel (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7.
""" """
def __init__(self, in_channels, num_classes, ca_ratio=8, sa_kernel=7): def __init__(self, in_channels, num_classes, ca_ratio=8, sa_kernel=7):
super().__init__() super().__init__()

@ -28,7 +28,7 @@ class DSIFN(nn.Layer):
The original article refers to The original article refers to
C. Zhang, et al., "A deeply supervised image fusion network for change detection in high resolution bi-temporal remote C. Zhang, et al., "A deeply supervised image fusion network for change detection in high resolution bi-temporal remote
sensing images" sensing images"
(https://www.sciencedirect.com/science/article/pii/S0924271620301532) (https://www.sciencedirect.com/science/article/pii/S0924271620301532).
Note that in this implementation, there is a flexible number of target classes. Note that in this implementation, there is a flexible number of target classes.
@ -37,6 +37,7 @@ class DSIFN(nn.Layer):
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
on a relatively small dataset, the dropout layers help prevent overfitting. Default: False. on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
""" """
def __init__(self, num_classes, use_dropout=False): def __init__(self, num_classes, use_dropout=False):
super().__init__() super().__init__()

@ -26,12 +26,13 @@ class ChannelAttention(nn.Layer):
The original article refers to The original article refers to
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module" Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521) (https://arxiv.org/abs/1807.06521).
Args: Args:
in_ch (int): The number of channels of the input features. in_ch (int): The number of channels of the input features.
ratio (int, optional): The channel reduction ratio. Default: 8. ratio (int, optional): The channel reduction ratio. Default: 8.
""" """
def __init__(self, in_ch, ratio=8): def __init__(self, in_ch, ratio=8):
super().__init__() super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2D(1) self.avg_pool = nn.AdaptiveAvgPool2D(1)
@ -52,11 +53,12 @@ class SpatialAttention(nn.Layer):
The original article refers to The original article refers to
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module" Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521) (https://arxiv.org/abs/1807.06521).
Args: Args:
kernel_size (int, optional): The size of the convolutional kernel. Default: 7. kernel_size (int, optional): The size of the convolutional kernel. Default: 7.
""" """
def __init__(self, kernel_size=7): def __init__(self, kernel_size=7):
super().__init__() super().__init__()
self.conv = BasicConv(2, 1, kernel_size, bias=False) self.conv = BasicConv(2, 1, kernel_size, bias=False)
@ -75,13 +77,14 @@ class CBAM(nn.Layer):
The original article refers to The original article refers to
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module" Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521) (https://arxiv.org/abs/1807.06521).
Args: Args:
in_ch (int): The number of channels of the input features. in_ch (int): The number of channels of the input features.
ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8. ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8.
kernel_size (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7. kernel_size (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7.
""" """
def __init__(self, in_ch, ratio=8, kernel_size=7): def __init__(self, in_ch, ratio=8, kernel_size=7):
super().__init__() super().__init__()
self.ca = ChannelAttention(in_ch, ratio=ratio) self.ca = ChannelAttention(in_ch, ratio=ratio)

@ -26,6 +26,7 @@ def normal_init(param, *args, **kwargs):
Returns: Returns:
The initialized parameters. The initialized parameters.
""" """
return nn.initializer.Normal(*args, **kwargs)(param) return nn.initializer.Normal(*args, **kwargs)(param)
@ -42,6 +43,7 @@ def kaiming_normal_init(param, *args, **kwargs):
Returns: Returns:
The initialized parameters. The initialized parameters.
""" """
return nn.initializer.KaimingNormal(*args, **kwargs)(param) return nn.initializer.KaimingNormal(*args, **kwargs)(param)
@ -55,6 +57,7 @@ def constant_init(param, *args, **kwargs):
Returns: Returns:
The initialized parameters. The initialized parameters.
""" """
return nn.initializer.Constant(*args, **kwargs)(param) return nn.initializer.Constant(*args, **kwargs)(param)
@ -73,6 +76,7 @@ class KaimingInitMixin:
self.bn = nn.BatchNorm2D(num_classes) self.bn = nn.BatchNorm2D(num_classes)
self.init_weight() self.init_weight()
""" """
def init_weight(self): def init_weight(self):
for layer in self.sublayers(): for layer in self.sublayers():
if isinstance(layer, nn.Conv2D): if isinstance(layer, nn.Conv2D):

@ -28,7 +28,7 @@ class SNUNet(nn.Layer, KaimingInitMixin):
The original article refers to The original article refers to
S. Fang, et al., "SNUNet-CD: A Densely Connected Siamese Network for Change Detection of VHR Images" S. Fang, et al., "SNUNet-CD: A Densely Connected Siamese Network for Change Detection of VHR Images"
(https://ieeexplore.ieee.org/document/9355573) (https://ieeexplore.ieee.org/document/9355573).
Note that bilinear interpolation is adopted as the upsampling method, which is different from the paper. Note that bilinear interpolation is adopted as the upsampling method, which is different from the paper.
@ -37,6 +37,7 @@ class SNUNet(nn.Layer, KaimingInitMixin):
num_classes (int): The number of target classes. num_classes (int): The number of target classes.
width (int, optional): The output channels of the first convolutional layer. Default: 32. width (int, optional): The output channels of the first convolutional layer. Default: 32.
""" """
def __init__(self, in_channels, num_classes, width=32): def __init__(self, in_channels, num_classes, width=32):
super().__init__() super().__init__()

@ -28,7 +28,7 @@ class STANet(nn.Layer):
The original article refers to The original article refers to
H. Chen and Z. Shi, "A Spatial-Temporal Attention-Based Method and a New Dataset for Remote Sensing Image Change Detection" H. Chen and Z. Shi, "A Spatial-Temporal Attention-Based Method and a New Dataset for Remote Sensing Image Change Detection"
(https://www.mdpi.com/2072-4292/12/10/1662) (https://www.mdpi.com/2072-4292/12/10/1662).
Note that this implementation differs from the original work in two aspects: Note that this implementation differs from the original work in two aspects:
1. We do not use multiple dilation rates in layer 4 of the ResNet backbone. 1. We do not use multiple dilation rates in layer 4 of the ResNet backbone.
@ -45,6 +45,7 @@ class STANet(nn.Layer):
Raises: Raises:
ValueError: When `att_type` has an illeagal value (unsupported attention type). ValueError: When `att_type` has an illeagal value (unsupported attention type).
""" """
def __init__( def __init__(
self, self,
in_channels, in_channels,

@ -27,15 +27,15 @@ class UNetEarlyFusion(nn.Layer):
The original article refers to The original article refers to
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection" Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462) (https://arxiv.org/abs/1810.08462).
Args: Args:
in_channels (int): The number of bands of the input images. in_channels (int): The number of bands of the input images.
num_classes (int): The number of target classes. num_classes (int): The number of target classes.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
When the model is trained on a relatively small dataset, the dropout layers help prevent on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
overfitting. Default: False.
""" """
def __init__( def __init__(
self, self,
in_channels, in_channels,

@ -27,15 +27,15 @@ class UNetSiamConc(nn.Layer):
The original article refers to The original article refers to
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection" Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462) (https://arxiv.org/abs/1810.08462).
Args: Args:
in_channels (int): The number of bands of the input images. in_channels (int): The number of bands of the input images.
num_classes (int): The number of target classes. num_classes (int): The number of target classes.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
When the model is trained on a relatively small dataset, the dropout layers help prevent on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
overfitting. Default: False.
""" """
def __init__( def __init__(
self, self,
in_channels, in_channels,

@ -27,15 +27,15 @@ class UNetSiamDiff(nn.Layer):
The original article refers to The original article refers to
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection" Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462) (https://arxiv.org/abs/1810.08462).
Args: Args:
in_channels (int): The number of bands of the input images. in_channels (int): The number of bands of the input images.
num_classes (int): The number of target classes. num_classes (int): The number of target classes.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
When the model is trained on a relatively small dataset, the dropout layers help prevent on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
overfitting. Default: False.
""" """
def __init__( def __init__(
self, self,
in_channels, in_channels,

Loading…
Cancel
Save