定义TensorFlow中类输入的错误

发布于 2025-02-11 13:56:31 字数 3469 浏览 0 评论 0原文

我尝试将输入作为输入通道和输出通道作为tf.constant张量。但是代码显示错误作为类的输入,必须是{int32,int64}的向量,got shape [2,1]。我定义了gtlayer类如下:

导入库:

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np

gtlayer类:

class GTLayer(keras.layers.Layer):
    
    def __init__(self, in_channels, out_channels, first=True):
        super(GTLayer, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.first = first
        if self.first == True:
            self.conv1 = GTConv(in_channels, out_channels)
            self.conv2 = GTConv(in_channels, out_channels)
        else:
            self.conv1 = GTConv(in_channels, out_channels)
    
    def forward(self, A, H_=None):
        if self.first == True:
            a = self.conv1(A)
            b = self.conv2(A)
            #H = torch.bmm(a,b)
            H = tf.matmul(a, b)

            #W = [(F.softmax(self.conv1.weight, dim=1)).detach(),(F.softmax(self.conv2.weight, dim=1)).detach()]
            W = [tf.stop_gradient(tf.nn.softmax(self.conv1.weight, axis=1).numpy()),
                 tf.stop_gradient(tf.nn.softmax(self.conv1.weight, axis=1).numpy()) ]

        else:
            a = self.conv1(A)
            #H = torch.bmm(H_,a)
            H = tf.matmul(H_, a)
            #W = [(F.softmax(self.conv1.weight, dim=1)).detach()]
            W = [tf.stop_gradient(tf.nn.softmax(self.conv1.weight, axis=1).numpy())]
        return H,W

gtconv layer

class GTConv(keras.layers.Layer):
    
    def __init__(self, in_channels, out_channels):
        super(GTConv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        w_init = tf.random_normal_initializer()
        self.weight = tf.Variable(
            initial_value=w_init(shape=(in_channels, out_channels)),
            trainable=True)
        self.bias = None
        self.scale = tf.Variable([0.1] , trainable=False)
        self.reset_parameters()

        #self.weight = nn.Parameter(torch.Tensor(out_channels,in_channels,1,1))
        #self.bias = None
        #self.scale = nn.Parameter(torch.Tensor([0.1]), requires_grad=False)
        
    def reset_parameters(self):
        n = self.in_channels
        tf.fill(self.weight, 9)
        #if self.bias is not None:
        #    fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
        #   bound = 1 / math.sqrt(fan_in)
        #    nn.init.uniform_(self.bias, -bound, bound)
    
    def forward(self, A):
        A = tf.add_n(tf.nn.softmax(self.weight))
        return A 

输入:

inp = tf.constant([4])
out = tf.constant([2])

错误:

输出:

d = GTLayer(inp, out)

---------------------------------------------------------------------- --------------------------------------无效的Error Trackback(最近的电话 last)in() ----> 1 d = gtlayer(inp,out)

5帧 /USR/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py 在rish_from_not_ok_status中(e,name)7184 def RISE_FROM_NOT_OK_STATUS(E,名称):7185 E.Message +=(“名称:” + 名称如果名称不是别的”) - > 7186从无#pylint提出核心。

无效的符号:形状必须是{int32,int64}的向量, 形状[2,1] [OP:随机标准符号]

I tried giving the input as input channel and output channel as tf.constant tensor. But the code shows error as input for the class must be a vector of {int32,int64}, got shape [2,1]. I defined the GTLayer class as follows :

Importing libraries:

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np

GTLayer class:

class GTLayer(keras.layers.Layer):
    
    def __init__(self, in_channels, out_channels, first=True):
        super(GTLayer, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.first = first
        if self.first == True:
            self.conv1 = GTConv(in_channels, out_channels)
            self.conv2 = GTConv(in_channels, out_channels)
        else:
            self.conv1 = GTConv(in_channels, out_channels)
    
    def forward(self, A, H_=None):
        if self.first == True:
            a = self.conv1(A)
            b = self.conv2(A)
            #H = torch.bmm(a,b)
            H = tf.matmul(a, b)

            #W = [(F.softmax(self.conv1.weight, dim=1)).detach(),(F.softmax(self.conv2.weight, dim=1)).detach()]
            W = [tf.stop_gradient(tf.nn.softmax(self.conv1.weight, axis=1).numpy()),
                 tf.stop_gradient(tf.nn.softmax(self.conv1.weight, axis=1).numpy()) ]

        else:
            a = self.conv1(A)
            #H = torch.bmm(H_,a)
            H = tf.matmul(H_, a)
            #W = [(F.softmax(self.conv1.weight, dim=1)).detach()]
            W = [tf.stop_gradient(tf.nn.softmax(self.conv1.weight, axis=1).numpy())]
        return H,W

GTConv layer

class GTConv(keras.layers.Layer):
    
    def __init__(self, in_channels, out_channels):
        super(GTConv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        w_init = tf.random_normal_initializer()
        self.weight = tf.Variable(
            initial_value=w_init(shape=(in_channels, out_channels)),
            trainable=True)
        self.bias = None
        self.scale = tf.Variable([0.1] , trainable=False)
        self.reset_parameters()

        #self.weight = nn.Parameter(torch.Tensor(out_channels,in_channels,1,1))
        #self.bias = None
        #self.scale = nn.Parameter(torch.Tensor([0.1]), requires_grad=False)
        
    def reset_parameters(self):
        n = self.in_channels
        tf.fill(self.weight, 9)
        #if self.bias is not None:
        #    fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
        #   bound = 1 / math.sqrt(fan_in)
        #    nn.init.uniform_(self.bias, -bound, bound)
    
    def forward(self, A):
        A = tf.add_n(tf.nn.softmax(self.weight))
        return A 

input:

inp = tf.constant([4])
out = tf.constant([2])

Error:

output:

d = GTLayer(inp, out)

--------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call
last) in ()
----> 1 d = GTLayer(inp, out)

5 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py
in raise_from_not_ok_status(e, name) 7184 def
raise_from_not_ok_status(e, name): 7185 e.message += (" name: " +
name if name is not None else "")
-> 7186 raise core._status_to_exception(e) from None # pylint: disable=protected-access 7187 7188

InvalidArgumentError: shape must be a vector of {int32,int64}, got
shape [2,1] [Op:RandomStandardNormal]

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。
列表为空,暂无数据
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文