如何冻结TensorFlow中的自动编码器层
这是一个自动编码器网络,第一部分是编码器,第二部分是解码器。我想冻结前三个卷积层并保存编码器零件。你能帮我如何做吗?谢谢
def encoder(input_img):
#encoder
#input = 28 x 28 x 1 (wide and thin)
conv1 = Conv2D(64, (2,2), activation='relu', padding='same')(input_img) #28 x 28 x 32
conv2 = BatchNormalization()(conv1)
conv3 = Conv2D(32, (2,2), activation='relu', padding='same')(conv2)
conv4 = BatchNormalization()(conv3)
pool5 = MaxPooling2D(pool_size=(2,2))(conv4) #14 x 14 x 32
conv6 = Conv2D(16, (2,2), activation='relu', padding='same')(pool5) #14 x 14 x 64
conv7 = BatchNormalization()(conv6)
conv8 = Conv2D(8, (2,2), activation='relu', padding='same')(conv7)
conv9 = BatchNormalization()(conv8)
conv10 = Conv2D(4, (2,2), activation='relu', padding='same')(conv9)
return conv10
def decoder(conv11):
#decoder
conv12 = Conv2D(4, (2,2), activation='relu', padding='same')(conv11)
conv13 = Conv2D(8, (2,2), activation='relu', padding='same')(conv12) #7 x 7 x 128
conv14 = BatchNormalization()(conv13)
conv15 = Conv2D(16, (2,2), activation='relu', padding='same')(conv14)
conv16 = BatchNormalization()(conv15)
conv17 = Conv2D(32, (2,2), activation='relu', padding='same')(conv16) #7 x 7 x 64
conv18 = BatchNormalization()(conv17)
conv19 = Conv2D(64, (2,2), activation='relu', padding='same')(conv18)
conv20 = BatchNormalization()(conv19)
up21 = UpSampling2D((2,2))(conv20) #14 x 14 x 64
decoded = Conv2D(3, (2,2), activation='sigmoid', padding='same')(up21) # 28 x 28 x
return decoded
autoencoder = Model(input_img, decoder(encoder(input_img)))
autoencoder.compile(loss='mae', optimizer = 'SGD')
autoencoder.summary()
train = np.concatenate((normal[0:1900,:,:,:],un_informative[0:1900,:,:,:]),axis=0)
valid = np.concatenate((normal[1900:,:,:,:],un_informative[1900:,:,:,:]),axis=0)
history = autoencoder.fit(train ,train , batch_size=batch_size,epochs=200,verbose=1, validation_data=(valid, valid))
This is an autoencoder network, the first part is encoder and second part is decoder. I want to freeze the first three convolution layers and save the encoder parts. can you help me how can i do it? Thank you
def encoder(input_img):
#encoder
#input = 28 x 28 x 1 (wide and thin)
conv1 = Conv2D(64, (2,2), activation='relu', padding='same')(input_img) #28 x 28 x 32
conv2 = BatchNormalization()(conv1)
conv3 = Conv2D(32, (2,2), activation='relu', padding='same')(conv2)
conv4 = BatchNormalization()(conv3)
pool5 = MaxPooling2D(pool_size=(2,2))(conv4) #14 x 14 x 32
conv6 = Conv2D(16, (2,2), activation='relu', padding='same')(pool5) #14 x 14 x 64
conv7 = BatchNormalization()(conv6)
conv8 = Conv2D(8, (2,2), activation='relu', padding='same')(conv7)
conv9 = BatchNormalization()(conv8)
conv10 = Conv2D(4, (2,2), activation='relu', padding='same')(conv9)
return conv10
def decoder(conv11):
#decoder
conv12 = Conv2D(4, (2,2), activation='relu', padding='same')(conv11)
conv13 = Conv2D(8, (2,2), activation='relu', padding='same')(conv12) #7 x 7 x 128
conv14 = BatchNormalization()(conv13)
conv15 = Conv2D(16, (2,2), activation='relu', padding='same')(conv14)
conv16 = BatchNormalization()(conv15)
conv17 = Conv2D(32, (2,2), activation='relu', padding='same')(conv16) #7 x 7 x 64
conv18 = BatchNormalization()(conv17)
conv19 = Conv2D(64, (2,2), activation='relu', padding='same')(conv18)
conv20 = BatchNormalization()(conv19)
up21 = UpSampling2D((2,2))(conv20) #14 x 14 x 64
decoded = Conv2D(3, (2,2), activation='sigmoid', padding='same')(up21) # 28 x 28 x
return decoded
autoencoder = Model(input_img, decoder(encoder(input_img)))
autoencoder.compile(loss='mae', optimizer = 'SGD')
autoencoder.summary()
train = np.concatenate((normal[0:1900,:,:,:],un_informative[0:1900,:,:,:]),axis=0)
valid = np.concatenate((normal[1900:,:,:,:],un_informative[1900:,:,:,:]),axis=0)
history = autoencoder.fit(train ,train , batch_size=batch_size,epochs=200,verbose=1, validation_data=(valid, valid))
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
编译模型后,您可以选择通过指定冻结的层:
我建议您进行以下操作:
After you compile your model you can choose which layers to freeze by specifying:
So I would suggest doing the following: