ValueError:模型的输入张量必须来自`keras.layers.input`缺少元数据 - 尝试在keras tensorflow中添加lambda层以重新连接
我有一个对我有用的重新连接模型。我试图在其前面添加几层lambda层。它在抱怨以下错误:
“ ValueError:模型的输入张量必须来自keras.layers.input
。接收:tensor:tensor(“ shape:0”,shape =(?,),dtype = int32)(缺少上一层元数据)。
而且我不确定这意味着什么或如何解决。
我在lambda层的以下两个块中添加了代码,这引起了问题。我知道我可以在重新连接之前做一个Numpy FFT。以前已经这样做了,但是正在努力将FFT层作为带有固定系数的重新系统的前层。如果还有其他方法可以实现这一目标,例如创建另一个具有FFT lambda层并将其与Resnet进行顺序的网络,以便可以通过两个网络将错误反向传播,这是可以的。
主要目标是将FFT功能放在重新系统层的前面,并能够以最简单的/最佳方式将重新网络分类器的误差倒入FFT层之前。
def resnode(x_in,filter_count,kernel_size=3,downsample=False):
x = x_in
x = keras_contrib.layers.InstanceNormalization()(x)
x = keras.layers.Lambda(lambda v: tf.spectral.fft(tf.cast(v,tf.complex64)))(x)
x = keras.layers.Lambda(lambda v: tf.abs(tf.cast(v,tf.complex64)))(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv1D(filter_count,kernel_size,padding='same')(x)
x = keras.layers.Dropout(0.1)(x)
x = keras_contrib.layers.InstanceNormalization()(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv1D(filter_count,kernel_size,padding='same')(x)
if downsample:
x_in = keras.layers.AveragePooling1D()(x_in)
x = keras.layers.AveragePooling1D()(x)
if x_in.shape[-1] != x.shape[-1]:
print('convolving x_in',x_in.shape)
x_in = keras.layers.Conv1D(filter_count,1,padding='same')(x_in)
print('Newshape x_in',x_in.shape)
x = keras.layers.Add()([x_in,x])
return x
def createresnet(outputs=4):
n_features = N
x_input = keras.layers.Input(shape=(None,1))
x_input = tf.shape(tf.squeeze(x_input))
x = keras.layers.Lambda(lambda v: tf.spectral.fft(tf.cast(v,tf.complex64)))(x_input) #
x = keras.layers.Lambda(lambda v: tf.abs(tf.cast(v,tf.complex64)))(x)
x = tf.cast(x, tf.float32)
x = tf.expand_dims(x, axis=0)
x = tf.expand_dims(x, axis=2)
filter_size = 32 #
x = resnode(x,filter_size,kernel_size=7) # FFT layer
filter_size = 32
x = resnode(x,filter_size,kernel_size=5,downsample=True)
filter_size = 64
x = resnode(x,filter_size,kernel_size=5,downsample=True)
x = resnode(x,filter_size,kernel_size=5,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 64
x = resnode(x,filter_size,downsample=True)
if False:
filter_size = 64
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 64
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = outputs
x = resnode(x,filter_size,downsample=True)
x = keras.layers.GlobalAveragePooling1D()(x)
x = keras.layers.Activation('softmax')(x)
model = keras.Model(inputs=x_input,outputs=x)
print(model.layers[0].output)
print(model.layers[1].output)
return model
感谢任何可以提供帮助的人!
I have a ResNet model that works for me. I am trying to add a couple of lambda layers to the front of it. It is complaining with the following error:
"ValueError: Input tensors to a Model must come from keras.layers.Input
. Received: Tensor("Shape:0", shape=(?,), dtype=int32) (missing previous layer metadata)."
and I am not sure what this means or how to fix it.
I added code to the following two blocks for the lambda layers, which caused the problem. I know that I can do a numpy FFT prior to the ResNet. Have done this previously, but am working toward having the FFT layer as the front layer of the ResNet with fixed coefficients. If there is some other way of accomplishing this, like creating another network that has the FFT lambda layer and making it sequential with the ResNet, such that error can be backpropagated through both networks, that would be ok.
The main goal is to have the FFT functionality in front of the ResNet layers and be able to backpropagate the error of the ResNet classifier to before the FFT layer in the easiest/best way.
def resnode(x_in,filter_count,kernel_size=3,downsample=False):
x = x_in
x = keras_contrib.layers.InstanceNormalization()(x)
x = keras.layers.Lambda(lambda v: tf.spectral.fft(tf.cast(v,tf.complex64)))(x)
x = keras.layers.Lambda(lambda v: tf.abs(tf.cast(v,tf.complex64)))(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv1D(filter_count,kernel_size,padding='same')(x)
x = keras.layers.Dropout(0.1)(x)
x = keras_contrib.layers.InstanceNormalization()(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv1D(filter_count,kernel_size,padding='same')(x)
if downsample:
x_in = keras.layers.AveragePooling1D()(x_in)
x = keras.layers.AveragePooling1D()(x)
if x_in.shape[-1] != x.shape[-1]:
print('convolving x_in',x_in.shape)
x_in = keras.layers.Conv1D(filter_count,1,padding='same')(x_in)
print('Newshape x_in',x_in.shape)
x = keras.layers.Add()([x_in,x])
return x
def createresnet(outputs=4):
n_features = N
x_input = keras.layers.Input(shape=(None,1))
x_input = tf.shape(tf.squeeze(x_input))
x = keras.layers.Lambda(lambda v: tf.spectral.fft(tf.cast(v,tf.complex64)))(x_input) #
x = keras.layers.Lambda(lambda v: tf.abs(tf.cast(v,tf.complex64)))(x)
x = tf.cast(x, tf.float32)
x = tf.expand_dims(x, axis=0)
x = tf.expand_dims(x, axis=2)
filter_size = 32 #
x = resnode(x,filter_size,kernel_size=7) # FFT layer
filter_size = 32
x = resnode(x,filter_size,kernel_size=5,downsample=True)
filter_size = 64
x = resnode(x,filter_size,kernel_size=5,downsample=True)
x = resnode(x,filter_size,kernel_size=5,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size,downsample=True)
x = resnode(x,filter_size,downsample=True)
filter_size = 64
x = resnode(x,filter_size,downsample=True)
if False:
filter_size = 64
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 64
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = 128
x = resnode(x,filter_size)
x = resnode(x,filter_size,downsample=True)
filter_size = outputs
x = resnode(x,filter_size,downsample=True)
x = keras.layers.GlobalAveragePooling1D()(x)
x = keras.layers.Activation('softmax')(x)
model = keras.Model(inputs=x_input,outputs=x)
print(model.layers[0].output)
print(model.layers[1].output)
return model
Thanks to anyone who can help!
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
正如评论中的@frightera所说,Squeeze()删除了大小1的所有维度。您可能正在寻找Flatten(),这将使您的频道减少到适用1D傅立叶变换的FFT()中的1D张量。
As a @Frightera in the comments said, squeeze() removes all dimensions of size 1. You might be looking for flatten() which would reduce your channels into a 1D tensor for the fft() which applies 1D Fourier transform.