[Style]: Adjust style

own
Bobholamovic 3 years ago
parent fd4118afe3
commit caf734693c
  1. 12
      paddlers/models/cd/models/__init__.py
  2. 5
      paddlers/models/cd/models/bit.py
  3. 5
      paddlers/models/cd/models/dsamnet.py
  4. 5
      paddlers/models/cd/models/dsifn.py
  5. 15
      paddlers/models/cd/models/layers/attention.py
  6. 6
      paddlers/models/cd/models/param_init.py
  7. 5
      paddlers/models/cd/models/snunet.py
  8. 5
      paddlers/models/cd/models/stanet.py
  9. 10
      paddlers/models/cd/models/unet_ef.py
  10. 10
      paddlers/models/cd/models/unet_siamconc.py
  11. 10
      paddlers/models/cd/models/unet_siamdiff.py

@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .bit import BIT
from .cdnet import CDNet
from .unet_ef import UNetEarlyFusion
from .unet_siamconc import UNetSiamConc
from .unet_siamdiff import UNetSiamDiff
from .dsifn import DSIFN
from .stanet import STANet
from .bit import BIT
from .snunet import SNUNet
from .dsifn import DSIFN
from .dsamnet import DSAMNet
from .dsamnet import DSAMNet
from .unet_ef import UNetEarlyFusion
from .unet_siamconc import UNetSiamConc
from .unet_siamdiff import UNetSiamDiff

@ -28,8 +28,8 @@ class BIT(nn.Layer):
The BIT implementation based on PaddlePaddle.
The original article refers to
H. Chen, et al., "Remote Sensing Image Change Detection With Transformers"
(https://arxiv.org/abs/2103.00208)
H. Chen, et al., "Remote Sensing Image Change Detection With Transformers"
(https://arxiv.org/abs/2103.00208).
This implementation adopts pretrained encoders, as opposed to the original work where weights are randomly initialized.
@ -56,6 +56,7 @@ class BIT(nn.Layer):
Raises:
ValueError: When an unsupported backbone type is specified, or the number of backbone stages is not 3, 4, or 5.
"""
def __init__(
self, in_channels, num_classes,
backbone='resnet18', n_stages=4,

@ -26,9 +26,9 @@ class DSAMNet(nn.Layer):
The DSAMNet implementation based on PaddlePaddle.
The original article refers to
Q. Shi, et al., "A Deeply Supervised Attention Metric-Based Network and an Open Aerial Image Dataset for Remote Sensing
Q. Shi, et al., "A Deeply Supervised Attention Metric-Based Network and an Open Aerial Image Dataset for Remote Sensing
Change Detection"
(https://ieeexplore.ieee.org/document/9467555)
(https://ieeexplore.ieee.org/document/9467555).
Note that this implementation differs from the original work in two aspects:
1. We do not use multiple dilation rates in layer 4 of the ResNet backbone.
@ -40,6 +40,7 @@ class DSAMNet(nn.Layer):
ca_ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8.
sa_kernel (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7.
"""
def __init__(self, in_channels, num_classes, ca_ratio=8, sa_kernel=7):
super().__init__()

@ -26,9 +26,9 @@ class DSIFN(nn.Layer):
The DSIFN implementation based on PaddlePaddle.
The original article refers to
C. Zhang, et al., "A deeply supervised image fusion network for change detection in high resolution bi-temporal remote
C. Zhang, et al., "A deeply supervised image fusion network for change detection in high resolution bi-temporal remote
sensing images"
(https://www.sciencedirect.com/science/article/pii/S0924271620301532)
(https://www.sciencedirect.com/science/article/pii/S0924271620301532).
Note that in this implementation, there is a flexible number of target classes.
@ -37,6 +37,7 @@ class DSIFN(nn.Layer):
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
"""
def __init__(self, num_classes, use_dropout=False):
super().__init__()

@ -25,13 +25,14 @@ class ChannelAttention(nn.Layer):
The channel attention module implementation based on PaddlePaddle.
The original article refers to
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521)
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521).
Args:
in_ch (int): The number of channels of the input features.
ratio (int, optional): The channel reduction ratio. Default: 8.
"""
def __init__(self, in_ch, ratio=8):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2D(1)
@ -51,12 +52,13 @@ class SpatialAttention(nn.Layer):
The spatial attention module implementation based on PaddlePaddle.
The original article refers to
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521)
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521).
Args:
kernel_size (int, optional): The size of the convolutional kernel. Default: 7.
"""
def __init__(self, kernel_size=7):
super().__init__()
self.conv = BasicConv(2, 1, kernel_size, bias=False)
@ -74,14 +76,15 @@ class CBAM(nn.Layer):
The CBAM implementation based on PaddlePaddle.
The original article refers to
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521)
Sanghyun Woo, et al., "CBAM: Convolutional Block Attention Module"
(https://arxiv.org/abs/1807.06521).
Args:
in_ch (int): The number of channels of the input features.
ratio (int, optional): The channel reduction ratio for the channel attention module. Default: 8.
kernel_size (int, optional): The size of the convolutional kernel used in the spatial attention module. Default: 7.
"""
def __init__(self, in_ch, ratio=8, kernel_size=7):
super().__init__()
self.ca = ChannelAttention(in_ch, ratio=ratio)

@ -26,6 +26,7 @@ def normal_init(param, *args, **kwargs):
Returns:
The initialized parameters.
"""
return nn.initializer.Normal(*args, **kwargs)(param)
@ -34,7 +35,7 @@ def kaiming_normal_init(param, *args, **kwargs):
Initialize parameters with the Kaiming normal distribution.
For more information about the Kaiming initialization method, please refer to
https://arxiv.org/abs/1502.01852
https://arxiv.org/abs/1502.01852
Args:
param (Tensor): The tensor that needs to be initialized.
@ -42,6 +43,7 @@ def kaiming_normal_init(param, *args, **kwargs):
Returns:
The initialized parameters.
"""
return nn.initializer.KaimingNormal(*args, **kwargs)(param)
@ -55,6 +57,7 @@ def constant_init(param, *args, **kwargs):
Returns:
The initialized parameters.
"""
return nn.initializer.Constant(*args, **kwargs)(param)
@ -73,6 +76,7 @@ class KaimingInitMixin:
self.bn = nn.BatchNorm2D(num_classes)
self.init_weight()
"""
def init_weight(self):
for layer in self.sublayers():
if isinstance(layer, nn.Conv2D):

@ -27,8 +27,8 @@ class SNUNet(nn.Layer, KaimingInitMixin):
The SNUNet implementation based on PaddlePaddle.
The original article refers to
S. Fang, et al., "SNUNet-CD: A Densely Connected Siamese Network for Change Detection of VHR Images"
(https://ieeexplore.ieee.org/document/9355573)
S. Fang, et al., "SNUNet-CD: A Densely Connected Siamese Network for Change Detection of VHR Images"
(https://ieeexplore.ieee.org/document/9355573).
Note that bilinear interpolation is adopted as the upsampling method, which is different from the paper.
@ -37,6 +37,7 @@ class SNUNet(nn.Layer, KaimingInitMixin):
num_classes (int): The number of target classes.
width (int, optional): The output channels of the first convolutional layer. Default: 32.
"""
def __init__(self, in_channels, num_classes, width=32):
super().__init__()

@ -27,8 +27,8 @@ class STANet(nn.Layer):
The STANet implementation based on PaddlePaddle.
The original article refers to
H. Chen and Z. Shi, "A Spatial-Temporal Attention-Based Method and a New Dataset for Remote Sensing Image Change Detection"
(https://www.mdpi.com/2072-4292/12/10/1662)
H. Chen and Z. Shi, "A Spatial-Temporal Attention-Based Method and a New Dataset for Remote Sensing Image Change Detection"
(https://www.mdpi.com/2072-4292/12/10/1662).
Note that this implementation differs from the original work in two aspects:
1. We do not use multiple dilation rates in layer 4 of the ResNet backbone.
@ -45,6 +45,7 @@ class STANet(nn.Layer):
Raises:
ValueError: When `att_type` has an illeagal value (unsupported attention type).
"""
def __init__(
self,
in_channels,

@ -26,16 +26,16 @@ class UNetEarlyFusion(nn.Layer):
The FC-EF implementation based on PaddlePaddle.
The original article refers to
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462)
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462).
Args:
in_channels (int): The number of bands of the input images.
num_classes (int): The number of target classes.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers.
When the model is trained on a relatively small dataset, the dropout layers help prevent
overfitting. Default: False.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
"""
def __init__(
self,
in_channels,

@ -26,16 +26,16 @@ class UNetSiamConc(nn.Layer):
The FC-Siam-conc implementation based on PaddlePaddle.
The original article refers to
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462)
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462).
Args:
in_channels (int): The number of bands of the input images.
num_classes (int): The number of target classes.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers.
When the model is trained on a relatively small dataset, the dropout layers help prevent
overfitting. Default: False.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
"""
def __init__(
self,
in_channels,

@ -26,16 +26,16 @@ class UNetSiamDiff(nn.Layer):
The FC-Siam-diff implementation based on PaddlePaddle.
The original article refers to
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462)
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection"
(https://arxiv.org/abs/1810.08462).
Args:
in_channels (int): The number of bands of the input images.
num_classes (int): The number of target classes.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers.
When the model is trained on a relatively small dataset, the dropout layers help prevent
overfitting. Default: False.
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. When the model is trained
on a relatively small dataset, the dropout layers help prevent overfitting. Default: False.
"""
def __init__(
self,
in_channels,

Loading…
Cancel
Save