文章来源于网络收集而来,版权归原创者所有,如有侵权请及时联系!
定义分类网络
VGG16 网络
上图中,每一列对应一种结构配置。 重点看 D 类配置,包含:
- 13 个卷积层(Convolutional Layer)
- 3 个全连接层(Fully connected Layer)
- 5 个池化层(Pool layer),分别用 maxpool 表示
其中,卷积层和全连接层具有权重系数,因此也被称为权重层,总数目为 13+3=16,这即是
VGG16 中 16 的来源。(池化层不涉及权重,因此不属于权重层,不被计数)。
这里我们将全连接层简化,全局池化(global pooling)后直接 softmax 输出 12 个类的概率
- “global pooling”就是 pooling 的 滑窗 size 和整张 feature map 的 size 一样大。这样,每个 W×H×C 的 feature map 输入就会被转化为 1×1×C 输出
- 全局池化取代全连接层可以大大减少我们需要训练的参数
class VGG16net(fluid.dygraph.Layer):
def __init__(self):
super(VGG16net,self).__init__()
#这里可以使用循环,我没有用,是为了调整网络的时候方便,网络更直观,方便大家理解
self.block1_conv1_3_64=fluid.dygraph.Conv2D(num_channels=3,num_filters=64,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block1_conv2_3_64=fluid.dygraph.Conv2D(num_channels=64,num_filters=64,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block1_maxpool1=fluid.dygraph.Pool2D(pool_size=(2,2),pool_stride=2,pool_padding=0,pool_type='max')
self.block2_conv1_3_128=fluid.dygraph.Conv2D(num_channels=64,num_filters=128,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block2_conv2_3_128=fluid.dygraph.Conv2D(num_channels=128,num_filters=128,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block2_maxpool1=fluid.dygraph.Pool2D(pool_size=(2,2),pool_stride=2,pool_padding=0,pool_type='max')
self.block3_conv1_3_256=fluid.dygraph.Conv2D(num_channels=128,num_filters=256,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block3_conv2_3_256=fluid.dygraph.Conv2D(num_channels=256,num_filters=256,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block3_conv3_3_256=fluid.dygraph.Conv2D(num_channels=256,num_filters=256,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block3_maxpool1=fluid.dygraph.Pool2D(pool_size=(2,2),pool_stride=2,pool_padding=0,pool_type='max')
self.block4_conv1_3_512=fluid.dygraph.Conv2D(num_channels=256,num_filters=512,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block4_conv2_3_512=fluid.dygraph.Conv2D(num_channels=512,num_filters=512,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block4_conv3_3_512=fluid.dygraph.Conv2D(num_channels=512,num_filters=512,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block4_maxpool1=fluid.dygraph.Pool2D(pool_size=(2,2),pool_stride=2,pool_padding=0,pool_type='max')
self.block5_conv1_3_512=fluid.dygraph.Conv2D(num_channels=512,num_filters=512,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block5_conv2_3_512=fluid.dygraph.Conv2D(num_channels=512,num_filters=512,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block5_conv3_3_512=fluid.dygraph.Conv2D(num_channels=512,num_filters=512,filter_size=(3,3),stride=1,padding=1,act='relu')
self.block5_maxpool1=fluid.dygraph.Pool2D(global_pooling=True,pool_type='max') #全局池化层
self.fc1=fluid.dygraph.Linear(input_dim=512,output_dim=12,act='softmax')
def normal_fward(self,x):
'''
用于不需要 show featuremap 的训练过程
'''
x=self.block1_conv1_3_64(x)
x=self.block1_conv2_3_64(x)
x=self.block1_maxpool1(x)
x=self.block2_conv1_3_128(x)
x=self.block2_conv2_3_128(x)
x=self.block2_maxpool1(x)
x=self.block3_conv1_3_256(x)
x=self.block3_conv2_3_256(x)
x=self.block3_conv3_3_256(x)
x=self.block3_maxpool1(x)
x=self.block4_conv1_3_512(x)
x=self.block4_conv2_3_512(x)
x=self.block4_conv3_3_512(x)
x=self.block4_maxpool1(x)
x=self.block5_conv1_3_512(x)
x=self.block5_conv2_3_512(x)
x=self.block5_conv3_3_512(x)
x=self.block5_maxpool1(x)
x=fluid.layers.squeeze(x,axes=[]) #多余的维度去除(32,1,1,512)-->(32,512)
x=self.fc1(x)
return x
def Reverse_forward(self,x):
'''
正向传播之后,反向卷积并且显示特征图
'''
x=self.block1_conv1_3_64(x)
x=self.block1_conv2_3_64(x)
block1_maxpool1_temp=x
x=self.block1_maxpool1(x)
x=self.block2_conv1_3_128(x)
x=self.block2_conv2_3_128(x)
block2_maxpool1_temp=x
x=self.block2_maxpool1(x)
x=self.block3_conv1_3_256(x)
x=self.block3_conv2_3_256(x)
x=self.block3_conv3_3_256(x)
block3_maxpool1_temp=x
x=self.block3_maxpool1(x)
x=self.block4_conv1_3_512(x)
x=self.block4_conv2_3_512(x)
x=self.block4_conv3_3_512(x)
block4_maxpool1_temp=x
x=self.block4_maxpool1(x)
x=self.block5_conv1_3_512(x)
x=self.block5_conv2_3_512(x)
x=self.block5_conv3_3_512(x)
#反向卷积,池化
rblock5_conv3 = Reverse_Conv('block5_conv3',512,self.block5_conv3_3_512,512,512,3)
rx = rblock5_conv3.reverse_conv(x)
rblock5_conv2 = Reverse_Conv('block5_conv2',512,self.block5_conv2_3_512,512,512,3)
rx = rblock5_conv2.reverse_conv(rx)
rblock5_conv1 = Reverse_Conv('block5_conv1',512,self.block5_conv1_3_512,512,512,3)
rx = rblock5_conv1.reverse_conv(rx)
rblock4_maxpool = Reverse_Pooling(block4_maxpool1_temp,2,2,2,0)
rx = rblock4_maxpool.backward(rx)
rblock4_conv3 = Reverse_Conv('block4_conv3',512,self.block4_conv3_3_512,512,512,3)
rx = rblock4_conv3.reverse_conv(rx)
rblock4_conv2 = Reverse_Conv('block4_conv2',512,self.block4_conv2_3_512,512,512,3)
rx = rblock4_conv2.reverse_conv(rx)
rblock4_conv1 = Reverse_Conv('block4_conv1',256,self.block4_conv1_3_512,512,256,3)
rx = rblock4_conv1.reverse_conv(rx)
rblock3_maxpool = Reverse_Pooling(block3_maxpool1_temp,2,2,2,0)
rx = rblock3_maxpool.backward(rx)
rblock3_conv3 = Reverse_Conv('block3_conv3',256,self.block3_conv3_3_256,256,256,3)
rx = rblock3_conv3.reverse_conv(rx)
rblock3_conv2 = Reverse_Conv('block3_conv2',256,self.block3_conv2_3_256,256,256,3)
rx = rblock3_conv2.reverse_conv(rx)
rblock3_conv1 = Reverse_Conv('block3_conv1',256,self.block3_conv1_3_256,256,128,3)
rx = rblock3_conv1.reverse_conv(rx)
rblock2_maxpool = Reverse_Pooling(block2_maxpool1_temp,2,2,2,0)
rx = rblock2_maxpool.backward(rx)
rblock2_conv2 = Reverse_Conv('block2_conv2',128,self.block2_conv2_3_128,128,128,3)
rx = rblock2_conv2.reverse_conv(rx)
rblock2_conv1 = Reverse_Conv('block2_conv1',128,self.block2_conv1_3_128,128,64,3)
rx = rblock2_conv1.reverse_conv(rx)
rblock1_maxpool = Reverse_Pooling(block1_maxpool1_temp,2,2,2,0)
rx = rblock1_maxpool.backward(rx)
rblock1_conv2 = Reverse_Conv('block1_conv2',64,self.block1_conv2_3_64,64,64,3)
rx = rblock1_conv2.reverse_conv(rx)
rblock1_conv1 = Reverse_Conv('block1_conv1',64,self.block1_conv1_3_64,64,3,3)
rx = rblock1_conv1.reverse_conv(rx)
x=self.block5_maxpool1(x)
x=fluid.layers.squeeze(x,axes=[])
x=self.fc1(x)
return x
def forward(self,x,is_display_feature=False):
if is_display_feature:
x = self.Reverse_forward(x)
else:
x = self.normal_fward(x)
return x
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论