parent
bfd2554f98
commit
32babb3418
6 changed files with 459 additions and 2 deletions
@ -0,0 +1,15 @@ |
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. |
||||||
|
# |
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
# you may not use this file except in compliance with the License. |
||||||
|
# You may obtain a copy of the License at |
||||||
|
# |
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
# |
||||||
|
# Unless required by applicable law or agreed to in writing, software |
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
# See the License for the specific language governing permissions and |
||||||
|
# limitations under the License. |
||||||
|
|
||||||
|
from .blocks import * |
@ -0,0 +1,140 @@ |
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. |
||||||
|
# |
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
# you may not use this file except in compliance with the License. |
||||||
|
# You may obtain a copy of the License at |
||||||
|
# |
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
# |
||||||
|
# Unless required by applicable law or agreed to in writing, software |
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
# See the License for the specific language governing permissions and |
||||||
|
# limitations under the License. |
||||||
|
|
||||||
|
import paddle.nn as nn |
||||||
|
|
||||||
|
|
||||||
|
__all__ = [ |
||||||
|
'BasicConv', 'Conv1x1', 'Conv3x3', 'Conv7x7', |
||||||
|
'MaxPool2x2', 'MaxUnPool2x2', |
||||||
|
'ConvTransposed3x3', |
||||||
|
'Identity' |
||||||
|
] |
||||||
|
|
||||||
|
|
||||||
|
def get_norm_layer(): |
||||||
|
# TODO: select appropriate norm layer. |
||||||
|
return nn.BatchNorm2D |
||||||
|
|
||||||
|
|
||||||
|
def get_act_layer(): |
||||||
|
# TODO: select appropriate activation layer. |
||||||
|
return nn.ReLU |
||||||
|
|
||||||
|
|
||||||
|
def make_norm(*args, **kwargs): |
||||||
|
norm_layer = get_norm_layer() |
||||||
|
return norm_layer(*args, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
def make_act(*args, **kwargs): |
||||||
|
act_layer = get_act_layer() |
||||||
|
return act_layer(*args, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class BasicConv(nn.Layer): |
||||||
|
def __init__( |
||||||
|
self, in_ch, out_ch, |
||||||
|
kernel_size, pad_mode='constant', |
||||||
|
bias='auto', norm=False, act=False, |
||||||
|
**kwargs |
||||||
|
): |
||||||
|
super().__init__() |
||||||
|
seq = [] |
||||||
|
if kernel_size >= 2: |
||||||
|
seq.append(nn.Pad2D(kernel_size//2, mode=pad_mode)) |
||||||
|
seq.append( |
||||||
|
nn.Conv2D( |
||||||
|
in_ch, out_ch, kernel_size, |
||||||
|
stride=1, padding=0, |
||||||
|
bias_attr=(False if norm else None) if bias=='auto' else bias, |
||||||
|
**kwargs |
||||||
|
) |
||||||
|
) |
||||||
|
if norm: |
||||||
|
if norm is True: |
||||||
|
norm = make_norm(out_ch) |
||||||
|
seq.append(norm) |
||||||
|
if act: |
||||||
|
if act is True: |
||||||
|
act = make_act() |
||||||
|
seq.append(act) |
||||||
|
self.seq = nn.Sequential(*seq) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
return self.seq(x) |
||||||
|
|
||||||
|
|
||||||
|
class Conv1x1(BasicConv): |
||||||
|
def __init__(self, in_ch, out_ch, pad_mode='constant', bias='auto', norm=False, act=False, **kwargs): |
||||||
|
super().__init__(in_ch, out_ch, 1, pad_mode=pad_mode, bias=bias, norm=norm, act=act, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class Conv3x3(BasicConv): |
||||||
|
def __init__(self, in_ch, out_ch, pad_mode='constant', bias='auto', norm=False, act=False, **kwargs): |
||||||
|
super().__init__(in_ch, out_ch, 3, pad_mode=pad_mode, bias=bias, norm=norm, act=act, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class Conv7x7(BasicConv): |
||||||
|
def __init__(self, in_ch, out_ch, pad_mode='constant', bias='auto', norm=False, act=False, **kwargs): |
||||||
|
super().__init__(in_ch, out_ch, 7, pad_mode=pad_mode, bias=bias, norm=norm, act=act, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class MaxPool2x2(nn.MaxPool2D): |
||||||
|
def __init__(self, **kwargs): |
||||||
|
super().__init__(kernel_size=2, stride=(2,2), padding=(0,0), **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class MaxUnPool2x2(nn.MaxUnPool2D): |
||||||
|
def __init__(self, **kwargs): |
||||||
|
super().__init__(kernel_size=2, stride=(2,2), padding=(0,0), **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class ConvTransposed3x3(nn.Layer): |
||||||
|
def __init__( |
||||||
|
self, in_ch, out_ch, |
||||||
|
bias='auto', norm=False, act=False, |
||||||
|
**kwargs |
||||||
|
): |
||||||
|
super().__init__() |
||||||
|
seq = [] |
||||||
|
seq.append( |
||||||
|
nn.Conv2DTranspose( |
||||||
|
in_ch, out_ch, 3, |
||||||
|
stride=2, padding=1, |
||||||
|
bias_attr=(False if norm else None) if bias=='auto' else bias, |
||||||
|
**kwargs |
||||||
|
) |
||||||
|
) |
||||||
|
if norm: |
||||||
|
if norm is True: |
||||||
|
norm = make_norm(out_ch) |
||||||
|
seq.append(norm) |
||||||
|
if act: |
||||||
|
if act is True: |
||||||
|
act = make_act() |
||||||
|
seq.append(act) |
||||||
|
self.seq = nn.Sequential(*seq) |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
return self.seq(x) |
||||||
|
|
||||||
|
|
||||||
|
class Identity(nn.Layer): |
||||||
|
"""A placeholder identity operator that accepts exactly one argument.""" |
||||||
|
def __init__(self, *args, **kwargs): |
||||||
|
super().__init__() |
||||||
|
|
||||||
|
def forward(self, x): |
||||||
|
return x |
@ -0,0 +1,82 @@ |
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. |
||||||
|
# |
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
# you may not use this file except in compliance with the License. |
||||||
|
# You may obtain a copy of the License at |
||||||
|
# |
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
# |
||||||
|
# Unless required by applicable law or agreed to in writing, software |
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
# See the License for the specific language governing permissions and |
||||||
|
# limitations under the License. |
||||||
|
|
||||||
|
import paddle.nn as nn |
||||||
|
import paddle.nn.functional as F |
||||||
|
|
||||||
|
|
||||||
|
def normal_init(param, *args, **kwargs): |
||||||
|
""" |
||||||
|
Initialize parameters with a normal distribution. |
||||||
|
|
||||||
|
Args: |
||||||
|
param (Tensor): The tensor that needs to be initialized. |
||||||
|
|
||||||
|
Returns: |
||||||
|
The initialized parameters. |
||||||
|
""" |
||||||
|
return nn.initializer.Normal(*args, **kwargs)(param) |
||||||
|
|
||||||
|
|
||||||
|
def kaiming_normal_init(param, *args, **kwargs): |
||||||
|
""" |
||||||
|
Initialize parameters with the Kaiming normal distribution. |
||||||
|
|
||||||
|
For more information about the Kaiming initialization method, please refer to |
||||||
|
https://arxiv.org/abs/1502.01852 |
||||||
|
|
||||||
|
Args: |
||||||
|
param (Tensor): The tensor that needs to be initialized. |
||||||
|
|
||||||
|
Returns: |
||||||
|
The initialized parameters. |
||||||
|
""" |
||||||
|
return nn.initializer.KaimingNormal(*args, **kwargs)(param) |
||||||
|
|
||||||
|
|
||||||
|
def constant_init(param, *args, **kwargs): |
||||||
|
""" |
||||||
|
Initialize parameters with constants. |
||||||
|
|
||||||
|
Args: |
||||||
|
param (Tensor): The tensor that needs to be initialized. |
||||||
|
|
||||||
|
Returns: |
||||||
|
The initialized parameters. |
||||||
|
""" |
||||||
|
return nn.initializer.Constant(*args, **kwargs)(param) |
||||||
|
|
||||||
|
|
||||||
|
class KaimingInitMixin: |
||||||
|
""" |
||||||
|
A mix-in that provides the Kaiming initialization functionality. |
||||||
|
|
||||||
|
Examples: |
||||||
|
|
||||||
|
from paddlers.models.cd.models.param_init import KaimingInitMixin |
||||||
|
|
||||||
|
class CustomNet(nn.Layer, KaimingInitMixin): |
||||||
|
def __init__(self, num_channels, num_classes): |
||||||
|
super().__init__() |
||||||
|
self.conv = nn.Conv2D(num_channels, num_classes, 3, 1, 0, bias_attr=False) |
||||||
|
self.bn = nn.BatchNorm2D(num_classes) |
||||||
|
self.init_weight() |
||||||
|
""" |
||||||
|
def init_weight(self): |
||||||
|
for layer in self.sublayers(): |
||||||
|
if isinstance(layer, nn.Conv2D): |
||||||
|
kaiming_normal_init(layer.weight) |
||||||
|
elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): |
||||||
|
constant_init(layer.weight, value=1) |
||||||
|
constant_init(layer.bias, value=0) |
@ -0,0 +1,201 @@ |
|||||||
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. |
||||||
|
# |
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
# you may not use this file except in compliance with the License. |
||||||
|
# You may obtain a copy of the License at |
||||||
|
# |
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
# |
||||||
|
# Unless required by applicable law or agreed to in writing, software |
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
# See the License for the specific language governing permissions and |
||||||
|
# limitations under the License. |
||||||
|
|
||||||
|
import paddle |
||||||
|
import paddle.nn as nn |
||||||
|
import paddle.nn.functional as F |
||||||
|
|
||||||
|
|
||||||
|
from .layers import Conv3x3, MaxPool2x2, ConvTransposed3x3, Identity |
||||||
|
from .param_init import normal_init, constant_init |
||||||
|
|
||||||
|
|
||||||
|
class UNetEarlyFusion(nn.Layer): |
||||||
|
""" |
||||||
|
The FC-EF implementation based on PaddlePaddle. |
||||||
|
|
||||||
|
The original article refers to |
||||||
|
Caye Daudt, R., et al. "Fully convolutional siamese networks for change detection" |
||||||
|
(https://arxiv.org/abs/1810.08462) |
||||||
|
|
||||||
|
Args: |
||||||
|
in_channels (int): The number of bands of the input images. |
||||||
|
num_classes (int): The number of target classes. |
||||||
|
use_dropout (bool, optional): A bool value that indicates whether to use dropout layers. |
||||||
|
When the model is trained on a relatively small dataset, the dropout layers help prevent |
||||||
|
overfitting. Default: False. |
||||||
|
""" |
||||||
|
def __init__( |
||||||
|
self, |
||||||
|
in_channels, |
||||||
|
num_classes, |
||||||
|
use_dropout=False |
||||||
|
): |
||||||
|
super().__init__() |
||||||
|
|
||||||
|
C1, C2, C3, C4, C5 = 16, 32, 64, 128, 256 |
||||||
|
|
||||||
|
self.use_dropout = use_dropout |
||||||
|
|
||||||
|
self.conv11 = Conv3x3(in_channels, C1, norm=True, act=True) |
||||||
|
self.do11 = self._make_dropout() |
||||||
|
self.conv12 = Conv3x3(C1, C1, norm=True, act=True) |
||||||
|
self.do12 = self._make_dropout() |
||||||
|
self.pool1 = MaxPool2x2() |
||||||
|
|
||||||
|
self.conv21 = Conv3x3(C1, C2, norm=True, act=True) |
||||||
|
self.do21 = self._make_dropout() |
||||||
|
self.conv22 = Conv3x3(C2, C2, norm=True, act=True) |
||||||
|
self.do22 = self._make_dropout() |
||||||
|
self.pool2 = MaxPool2x2() |
||||||
|
|
||||||
|
self.conv31 = Conv3x3(C2, C3, norm=True, act=True) |
||||||
|
self.do31 = self._make_dropout() |
||||||
|
self.conv32 = Conv3x3(C3, C3, norm=True, act=True) |
||||||
|
self.do32 = self._make_dropout() |
||||||
|
self.conv33 = Conv3x3(C3, C3, norm=True, act=True) |
||||||
|
self.do33 = self._make_dropout() |
||||||
|
self.pool3 = MaxPool2x2() |
||||||
|
|
||||||
|
self.conv41 = Conv3x3(C3, C4, norm=True, act=True) |
||||||
|
self.do41 = self._make_dropout() |
||||||
|
self.conv42 = Conv3x3(C4, C4, norm=True, act=True) |
||||||
|
self.do42 = self._make_dropout() |
||||||
|
self.conv43 = Conv3x3(C4, C4, norm=True, act=True) |
||||||
|
self.do43 = self._make_dropout() |
||||||
|
self.pool4 = MaxPool2x2() |
||||||
|
|
||||||
|
self.upconv4 = ConvTransposed3x3(C4, C4, output_padding=1) |
||||||
|
|
||||||
|
self.conv43d = Conv3x3(C5, C4, norm=True, act=True) |
||||||
|
self.do43d = self._make_dropout() |
||||||
|
self.conv42d = Conv3x3(C4, C4, norm=True, act=True) |
||||||
|
self.do42d = self._make_dropout() |
||||||
|
self.conv41d = Conv3x3(C4, C3, norm=True, act=True) |
||||||
|
self.do41d = self._make_dropout() |
||||||
|
|
||||||
|
self.upconv3 = ConvTransposed3x3(C3, C3, output_padding=1) |
||||||
|
|
||||||
|
self.conv33d = Conv3x3(C4, C3, norm=True, act=True) |
||||||
|
self.do33d = self._make_dropout() |
||||||
|
self.conv32d = Conv3x3(C3, C3, norm=True, act=True) |
||||||
|
self.do32d = self._make_dropout() |
||||||
|
self.conv31d = Conv3x3(C3, C2, norm=True, act=True) |
||||||
|
self.do31d = self._make_dropout() |
||||||
|
|
||||||
|
self.upconv2 = ConvTransposed3x3(C2, C2, output_padding=1) |
||||||
|
|
||||||
|
self.conv22d = Conv3x3(C3, C2, norm=True, act=True) |
||||||
|
self.do22d = self._make_dropout() |
||||||
|
self.conv21d = Conv3x3(C2, C1, norm=True, act=True) |
||||||
|
self.do21d = self._make_dropout() |
||||||
|
|
||||||
|
self.upconv1 = ConvTransposed3x3(C1, C1, output_padding=1) |
||||||
|
|
||||||
|
self.conv12d = Conv3x3(C2, C1, norm=True, act=True) |
||||||
|
self.do12d = self._make_dropout() |
||||||
|
self.conv11d = Conv3x3(C1, num_classes) |
||||||
|
|
||||||
|
self.init_weight() |
||||||
|
|
||||||
|
def forward(self, t1, t2): |
||||||
|
x = paddle.concat([t1, t2], axis=1) |
||||||
|
|
||||||
|
# Stage 1 |
||||||
|
x11 = self.do11(self.conv11(x)) |
||||||
|
x12 = self.do12(self.conv12(x11)) |
||||||
|
x1p = self.pool1(x12) |
||||||
|
|
||||||
|
# Stage 2 |
||||||
|
x21 = self.do21(self.conv21(x1p)) |
||||||
|
x22 = self.do22(self.conv22(x21)) |
||||||
|
x2p = self.pool2(x22) |
||||||
|
|
||||||
|
# Stage 3 |
||||||
|
x31 = self.do31(self.conv31(x2p)) |
||||||
|
x32 = self.do32(self.conv32(x31)) |
||||||
|
x33 = self.do33(self.conv33(x32)) |
||||||
|
x3p = self.pool3(x33) |
||||||
|
|
||||||
|
# Stage 4 |
||||||
|
x41 = self.do41(self.conv41(x3p)) |
||||||
|
x42 = self.do42(self.conv42(x41)) |
||||||
|
x43 = self.do43(self.conv43(x42)) |
||||||
|
x4p = self.pool4(x43) |
||||||
|
|
||||||
|
# Stage 4d |
||||||
|
x4d = self.upconv4(x4p) |
||||||
|
pad4 = ( |
||||||
|
0, |
||||||
|
paddle.shape(x43)[3]-paddle.shape(x4d)[3], |
||||||
|
0, |
||||||
|
paddle.shape(x43)[2]-paddle.shape(x4d)[2] |
||||||
|
) |
||||||
|
x4d = paddle.concat([F.pad(x4d, pad=pad4, mode='replicate'), x43], 1) |
||||||
|
x43d = self.do43d(self.conv43d(x4d)) |
||||||
|
x42d = self.do42d(self.conv42d(x43d)) |
||||||
|
x41d = self.do41d(self.conv41d(x42d)) |
||||||
|
|
||||||
|
# Stage 3d |
||||||
|
x3d = self.upconv3(x41d) |
||||||
|
pad3 = ( |
||||||
|
0, |
||||||
|
paddle.shape(x33)[3]-paddle.shape(x3d)[3], |
||||||
|
0, |
||||||
|
paddle.shape(x33)[2]-paddle.shape(x3d)[2] |
||||||
|
) |
||||||
|
x3d = paddle.concat([F.pad(x3d, pad=pad3, mode='replicate'), x33], 1) |
||||||
|
x33d = self.do33d(self.conv33d(x3d)) |
||||||
|
x32d = self.do32d(self.conv32d(x33d)) |
||||||
|
x31d = self.do31d(self.conv31d(x32d)) |
||||||
|
|
||||||
|
# Stage 2d |
||||||
|
x2d = self.upconv2(x31d) |
||||||
|
pad2 = ( |
||||||
|
0, |
||||||
|
paddle.shape(x22)[3]-paddle.shape(x2d)[3], |
||||||
|
0, |
||||||
|
paddle.shape(x22)[2]-paddle.shape(x2d)[2] |
||||||
|
) |
||||||
|
x2d = paddle.concat([F.pad(x2d, pad=pad2, mode='replicate'), x22], 1) |
||||||
|
x22d = self.do22d(self.conv22d(x2d)) |
||||||
|
x21d = self.do21d(self.conv21d(x22d)) |
||||||
|
|
||||||
|
# Stage 1d |
||||||
|
x1d = self.upconv1(x21d) |
||||||
|
pad1 = ( |
||||||
|
0, |
||||||
|
paddle.shape(x12)[3]-paddle.shape(x1d)[3], |
||||||
|
0, |
||||||
|
paddle.shape(x12)[2]-paddle.shape(x1d)[2] |
||||||
|
) |
||||||
|
x1d = paddle.concat([F.pad(x1d, pad=pad1, mode='replicate'), x12], 1) |
||||||
|
x12d = self.do12d(self.conv12d(x1d)) |
||||||
|
x11d = self.conv11d(x12d) |
||||||
|
|
||||||
|
return x11d, |
||||||
|
|
||||||
|
def init_weight(self): |
||||||
|
for sublayer in self.sublayers(): |
||||||
|
if isinstance(sublayer, nn.Conv2D): |
||||||
|
normal_init(sublayer.weight, std=0.001) |
||||||
|
elif isinstance(sublayer, (nn.BatchNorm, nn.SyncBatchNorm)): |
||||||
|
constant_init(sublayer.weight, value=1.0) |
||||||
|
constant_init(sublayer.bias, value=0.0) |
||||||
|
|
||||||
|
def _make_dropout(self): |
||||||
|
if self.use_dropout: |
||||||
|
return nn.Dropout2D(p=0.2) |
||||||
|
else: |
||||||
|
return Identity() |
Loading…
Reference in new issue