将TensorFlow Keras模型作为H5文件时输入错误

发布于 2025-02-04 06:13:04 字数 3973 浏览 1 评论 0 原文

我正在尝试制作一个自动编码器,以便获得图像的矢量格式(x:640,y:480)。但是,当我尝试调用 encoder.save(“ encoder.h5”)时,我会收到以下错误:

TypeError: ('Not JSON Serializable:', <tf.Variable 'batch_normalization/gamma:0' shape=(32,) dtype=float32, numpy=
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
      dtype=float32)>)

我很确定该模型在拟合函数工作时可以正常工作,并且我能够打电话 encoder.predict()完成拟合并拿出编码的向量(长度为60) 我的代码是:

import os
import tensorflow as tf
from tensorflow.python.keras.layers import Input, UpSampling2D, Add, Conv2D, MaxPooling2D, LeakyReLU
import cv2
from tensorflow.python.keras import Model
from tensorflow.python.keras import layers, losses
from tensorflow.python.keras.models import Model
import h5py

def get_encoder(shape=(640, 480, 3)):
    def res_block(x, n_features):
        _x = x
        x = tf.keras.layers.BatchNormalization()(x)
        x = LeakyReLU()(x)

        x = Conv2D(n_features, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
        x = Add()([_x, x])
        return x

    inp = Input(shape=shape)

    # 640 x 480
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(inp)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 320 x 240
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    for _ in range(2):
        x = res_block(x, 32)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 160 x 120
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    for _ in range(2):
        x = res_block(x, 32)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 80 x 60
    x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
    return Model(inp, x)


def get_decoder(shape=(240, 180, 3)):
    inp = Input(shape=shape)

    # 60 x 80
    x = UpSampling2D((2, 2))(inp)
    x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 120 x 160
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 240 x 320
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 480 x 640
    x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
    return Model(inp, x)


encoder = get_encoder((480, 640, 3))
decoder = get_decoder((60, 80, 1))
inp = Input((480, 640, 3))
e = encoder(inp)
d = decoder(e)
autoencoder = Model(inp, d)

autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())

batch_size = 8
SHAPE = (480, 640)
IMAGES = "pathToImages"
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_gen = image_generator.flow_from_directory(
    os.path.join(IMAGES, "train"),
    class_mode="input", target_size=SHAPE, batch_size=batch_size,
)
val_gen = image_generator.flow_from_directory(
    os.path.join(IMAGES, "test"),
    class_mode="input", target_size=SHAPE, batch_size=batch_size,
)

for i in range(10):
    autoencoder.fit(train_gen, validation_data=val_gen, epochs=1, steps_per_epoch=10, validation_steps= 5, batch_size=batch_size)
    encoder.save('encoder.h5')
    decoder.save('decoder.h5')

我基于此

关于我如何修复它的任何想法,还是节省模型权重的替代方法?

编辑:使用 encoder.save_weights('encoder.h5') works

I am trying to make an autoencoder in order to get a vector format for images(x:640, y:480). However when I try to call encoder.save("encoder.h5") I get the following error:

TypeError: ('Not JSON Serializable:', <tf.Variable 'batch_normalization/gamma:0' shape=(32,) dtype=float32, numpy=
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
       1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
      dtype=float32)>)

I'm fairly sure the model works as the fit function works, and I am able to call on the encoder.predict() after the fitting is done and get out the encoded vectors (length of 60)
My code is:

import os
import tensorflow as tf
from tensorflow.python.keras.layers import Input, UpSampling2D, Add, Conv2D, MaxPooling2D, LeakyReLU
import cv2
from tensorflow.python.keras import Model
from tensorflow.python.keras import layers, losses
from tensorflow.python.keras.models import Model
import h5py

def get_encoder(shape=(640, 480, 3)):
    def res_block(x, n_features):
        _x = x
        x = tf.keras.layers.BatchNormalization()(x)
        x = LeakyReLU()(x)

        x = Conv2D(n_features, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
        x = Add()([_x, x])
        return x

    inp = Input(shape=shape)

    # 640 x 480
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(inp)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 320 x 240
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    for _ in range(2):
        x = res_block(x, 32)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 160 x 120
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    for _ in range(2):
        x = res_block(x, 32)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    # 80 x 60
    x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
    return Model(inp, x)


def get_decoder(shape=(240, 180, 3)):
    inp = Input(shape=shape)

    # 60 x 80
    x = UpSampling2D((2, 2))(inp)
    x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 120 x 160
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 240 x 320
    x = UpSampling2D((2, 2))(x)
    x = Conv2D(8, kernel_size=(3, 3), strides=(1, 1), padding='same')(x)
    x = tf.keras.layers.BatchNormalization()(x)
    x = LeakyReLU()(x)

    # 480 x 640
    x = Conv2D(1, kernel_size=(1, 1), strides=(1, 1), padding='same')(x)
    return Model(inp, x)


encoder = get_encoder((480, 640, 3))
decoder = get_decoder((60, 80, 1))
inp = Input((480, 640, 3))
e = encoder(inp)
d = decoder(e)
autoencoder = Model(inp, d)

autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())

batch_size = 8
SHAPE = (480, 640)
IMAGES = "pathToImages"
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_gen = image_generator.flow_from_directory(
    os.path.join(IMAGES, "train"),
    class_mode="input", target_size=SHAPE, batch_size=batch_size,
)
val_gen = image_generator.flow_from_directory(
    os.path.join(IMAGES, "test"),
    class_mode="input", target_size=SHAPE, batch_size=batch_size,
)

for i in range(10):
    autoencoder.fit(train_gen, validation_data=val_gen, epochs=1, steps_per_epoch=10, validation_steps= 5, batch_size=batch_size)
    encoder.save('encoder.h5')
    decoder.save('decoder.h5')

I based it on this https://www.kaggle.com/code/miklgr500/image2vec-autoencoder/notebook

Any ideas on how I can fix it, or alternative methods for saving the model weights?

edit: using encoder.save_weights('encoder.h5') works

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

泪痕残 2025-02-11 06:13:04

OP对萨德拉的回答说了这一点:

使用 encoder.save_weights('encoder.h5') works

OP says this about Sadra's answer:

using encoder.save_weights('encoder.h5') works

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文