11from tensorlayerx .nn import Module
22import tensorlayerx as tlx
3- from tensorlayerx .nn import Conv2d , BatchNorm2d ,Elementwise ,SubpixelConv2d , SequentialLayer , UpSampling2d , Flatten
3+ from tensorlayerx .nn import Conv2d , BatchNorm2d ,Elementwise ,SubpixelConv2d , UpSampling2d , Flatten , Sequential
44from tensorlayerx .nn import Linear , MaxPool2d
55
66W_init = tlx .initializers .TruncatedNormal (stddev = 0.02 )
@@ -43,7 +43,7 @@ def make_layer(self):
4343 layer_list = []
4444 for i in range (16 ):
4545 layer_list .append (ResidualBlock ())
46- return SequentialLayer (layer_list )
46+ return Sequential (layer_list )
4747
4848 def forward (self , x ):
4949 x = self .conv1 (x )
@@ -91,7 +91,7 @@ def make_layer(self):
9191 layer_list = []
9292 for i in range (16 ):
9393 layer_list .append (ResidualBlock ())
94- return SequentialLayer (layer_list )
94+ return Sequential (layer_list )
9595
9696 def forward (self , x ):
9797 x = self .conv1 (x )
@@ -115,23 +115,23 @@ class SRGAN_d2(Module):
115115 """
116116 def __init__ (self , ):
117117 super (SRGAN_d2 , self ).__init__ ()
118- self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init )
119- self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
118+ self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init )
119+ self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
120120 self .bn1 = BatchNorm2d ( gamma_init = G_init )
121- self .conv3 = Conv2d (out_channels = 128 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
121+ self .conv3 = Conv2d (out_channels = 128 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
122122 self .bn2 = BatchNorm2d ( gamma_init = G_init )
123- self .conv4 = Conv2d (out_channels = 128 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
123+ self .conv4 = Conv2d (out_channels = 128 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
124124 self .bn3 = BatchNorm2d (gamma_init = G_init )
125- self .conv5 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
125+ self .conv5 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
126126 self .bn4 = BatchNorm2d ( gamma_init = G_init )
127- self .conv6 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
127+ self .conv6 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
128128 self .bn5 = BatchNorm2d ( gamma_init = G_init )
129- self .conv7 = Conv2d (out_channels = 512 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
129+ self .conv7 = Conv2d (out_channels = 512 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
130130 self .bn6 = BatchNorm2d ( gamma_init = G_init )
131- self .conv8 = Conv2d (out_channels = 512 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (alpha = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
131+ self .conv8 = Conv2d (out_channels = 512 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
132132 self .bn7 = BatchNorm2d ( gamma_init = G_init )
133133 self .flat = Flatten ()
134- self .dense1 = Linear (out_features = 1024 , act = tlx .LeakyReLU (alpha = 0.2 ))
134+ self .dense1 = Linear (out_features = 1024 , act = tlx .LeakyReLU (negative_slope = 0.2 ))
135135 self .dense2 = Linear (out_features = 1 )
136136
137137 def forward (self , x ):
0 commit comments