Skip to content

Commit 3fb86ce

Browse files
committed
update and fix sequentiallayer bug
1 parent 041e8da commit 3fb86ce

File tree

5 files changed

+14
-14
lines changed

5 files changed

+14
-14
lines changed

img/SRGAN Result.pptx

-26.4 MB
Binary file not shown.

img/SRGAN Result2.pptx

-13.1 MB
Binary file not shown.

img/SRGAN Result3.pptx

-671 KB
Binary file not shown.

srgan.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from tensorlayerx.nn import Module
22
import tensorlayerx as tlx
3-
from tensorlayerx.nn import Conv2d, BatchNorm2d,Elementwise,SubpixelConv2d, SequentialLayer, UpSampling2d, Flatten
3+
from tensorlayerx.nn import Conv2d, BatchNorm2d,Elementwise,SubpixelConv2d, UpSampling2d, Flatten, Sequential
44
from tensorlayerx.nn import Linear, MaxPool2d
55

66
W_init = tlx.initializers.TruncatedNormal(stddev=0.02)
@@ -43,7 +43,7 @@ def make_layer(self):
4343
layer_list = []
4444
for i in range(16):
4545
layer_list.append(ResidualBlock())
46-
return SequentialLayer(layer_list)
46+
return Sequential(layer_list)
4747

4848
def forward(self, x):
4949
x = self.conv1(x)
@@ -91,7 +91,7 @@ def make_layer(self):
9191
layer_list = []
9292
for i in range(16):
9393
layer_list.append(ResidualBlock())
94-
return SequentialLayer(layer_list)
94+
return Sequential(layer_list)
9595

9696
def forward(self, x):
9797
x = self.conv1(x)
@@ -115,23 +115,23 @@ class SRGAN_d2(Module):
115115
"""
116116
def __init__(self, ):
117117
super(SRGAN_d2, self).__init__()
118-
self.conv1 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init)
119-
self.conv2 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
118+
self.conv1 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init)
119+
self.conv2 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
120120
self.bn1 = BatchNorm2d( gamma_init=G_init)
121-
self.conv3 = Conv2d(out_channels=128, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
121+
self.conv3 = Conv2d(out_channels=128, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
122122
self.bn2 = BatchNorm2d( gamma_init=G_init)
123-
self.conv4 = Conv2d(out_channels=128, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
123+
self.conv4 = Conv2d(out_channels=128, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
124124
self.bn3 = BatchNorm2d(gamma_init=G_init)
125-
self.conv5 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
125+
self.conv5 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
126126
self.bn4 = BatchNorm2d( gamma_init=G_init)
127-
self.conv6 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
127+
self.conv6 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
128128
self.bn5 = BatchNorm2d( gamma_init=G_init)
129-
self.conv7 = Conv2d(out_channels=512, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
129+
self.conv7 = Conv2d(out_channels=512, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
130130
self.bn6 = BatchNorm2d( gamma_init=G_init)
131-
self.conv8 = Conv2d(out_channels=512, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
131+
self.conv8 = Conv2d(out_channels=512, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(negative_slope=0.2), padding='SAME', W_init=W_init, b_init=None)
132132
self.bn7 = BatchNorm2d( gamma_init=G_init)
133133
self.flat = Flatten()
134-
self.dense1 = Linear(out_features=1024, act=tlx.LeakyReLU(alpha=0.2))
134+
self.dense1 = Linear(out_features=1024, act=tlx.LeakyReLU(negative_slope=0.2))
135135
self.dense2 = Linear(out_features=1)
136136

137137
def forward(self, x):

vgg.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
import tensorlayerx as tlx
3535
from tensorlayerx import logging
3636
from tensorlayerx.files import assign_weights, maybe_download_and_extract
37-
from tensorlayerx.nn import (BatchNorm, Conv2d, Linear, Flatten, Input, SequentialLayer, MaxPool2d)
37+
from tensorlayerx.nn import (BatchNorm, Conv2d, Linear, Flatten, Input, Sequential, MaxPool2d)
3838
from tensorlayerx.nn import Module
3939

4040
__all__ = [
@@ -150,7 +150,7 @@ def make_layers(config, batch_norm=False, end_with='outputs'):
150150
is_end = True
151151
if is_end:
152152
break
153-
return SequentialLayer(layer_list)
153+
return Sequential(layer_list)
154154

155155
def restore_model(model, layer_type):
156156
logging.info("Restore pre-trained weights")

0 commit comments

Comments
 (0)