如何计算图形卷积网络的精度?
如何根据事件日志计算GCN上的精度?使用我的代码,我将获得以下结果:
时期:0,损失:1.2999316294690154,精度:[张量([5。]),张量([9。]),张量([7。]).... tensor([[[ 6。])验证损失:0.9456889930794145
我想将精度输出作为值。
lr_run = 0
for lr_run in range(3):
if lr_run==0:
lr_value = 1e-03
elif lr_run==1:
lr_value = 1e-04
elif lr_run==2:
lr_value = 1e-05
run = 0
for run in range(num_runs):
print("Run: {}, Learning Rate: {}".format(run+1,lr_value))
model = EventPredictor(num_nodes, num_features)
train_dl,valid_dl,test_dl = generate_input_and_labels(path)
adj = generate_process_graph(path)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=lr_value)
print("************* Event Predictor ***************")
print("Train size: {}, Validation size:{}, Test size: {}".format(len(train_dl.dataset),len(valid_dl.dataset),len(test_dl.dataset)))
print(model)
model = model.to(device)
adj = adj.to(device)
epochs_plt = []
acc_plt = []
loss_plt = []
valid_loss_plt = []
for epoch in range(num_epochs):
model.train()
num_train = 0
training_loss = 0
predictions, actuals = list(),list()
for i, (inputs,targets) in enumerate(train_dl):
inputs,targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
yhat = model(inputs[0],adj)
loss = criterion(yhat.reshape((1,-1)),targets[0].to(torch.long))
loss.backward()
optimizer.step()
training_loss+= loss.item()
yhat = yhat.to('cpu')
yhat = torch.argmax(yhat)
actual = targets.to('cpu')
actual = actual[0]
predictions.append(yhat)
actuals.append(actual)
num_train+=1
with torch.no_grad():
model.eval()
num_valid = 0
validation_loss = 0
for i,(inputs,targets) in enumerate(valid_dl):
inputs,targets = inputs.to(device),targets.to(device)
yhat_valid = model(inputs[0],adj)
loss_valid = criterion(yhat_valid.reshape((1,-1)),targets[0].to(torch.long))
validation_loss+= loss_valid.item()
num_valid+= 1
acc = accuracy_score = (actuals, predictions)
avg_training_loss = training_loss/num_train
avg_validation_loss = validation_loss/num_valid
print("Epoch: {}, Loss: {}, Accuracy: {}, Validation loss : {}".format(epoch, avg_training_loss, acc, avg_validation_loss))
epochs_plt.append(epoch+1)
acc_plt.append(acc)
loss_plt.append(avg_training_loss)
valid_loss_plt.append(avg_validation_loss)
how can I calculate the Accuracy on a GCN based on an event log? With my code I get the following result:
Epoch: 0, Loss: 1.299316294690154, Accuracy: [tensor([5.]), tensor([9.]), tensor([7.]) .... tensor([6.])Validation loss : 0.9456889930794145
I would like to have the Accuracy output as a value.
lr_run = 0
for lr_run in range(3):
if lr_run==0:
lr_value = 1e-03
elif lr_run==1:
lr_value = 1e-04
elif lr_run==2:
lr_value = 1e-05
run = 0
for run in range(num_runs):
print("Run: {}, Learning Rate: {}".format(run+1,lr_value))
model = EventPredictor(num_nodes, num_features)
train_dl,valid_dl,test_dl = generate_input_and_labels(path)
adj = generate_process_graph(path)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=lr_value)
print("************* Event Predictor ***************")
print("Train size: {}, Validation size:{}, Test size: {}".format(len(train_dl.dataset),len(valid_dl.dataset),len(test_dl.dataset)))
print(model)
model = model.to(device)
adj = adj.to(device)
epochs_plt = []
acc_plt = []
loss_plt = []
valid_loss_plt = []
for epoch in range(num_epochs):
model.train()
num_train = 0
training_loss = 0
predictions, actuals = list(),list()
for i, (inputs,targets) in enumerate(train_dl):
inputs,targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
yhat = model(inputs[0],adj)
loss = criterion(yhat.reshape((1,-1)),targets[0].to(torch.long))
loss.backward()
optimizer.step()
training_loss+= loss.item()
yhat = yhat.to('cpu')
yhat = torch.argmax(yhat)
actual = targets.to('cpu')
actual = actual[0]
predictions.append(yhat)
actuals.append(actual)
num_train+=1
with torch.no_grad():
model.eval()
num_valid = 0
validation_loss = 0
for i,(inputs,targets) in enumerate(valid_dl):
inputs,targets = inputs.to(device),targets.to(device)
yhat_valid = model(inputs[0],adj)
loss_valid = criterion(yhat_valid.reshape((1,-1)),targets[0].to(torch.long))
validation_loss+= loss_valid.item()
num_valid+= 1
acc = accuracy_score = (actuals, predictions)
avg_training_loss = training_loss/num_train
avg_validation_loss = validation_loss/num_valid
print("Epoch: {}, Loss: {}, Accuracy: {}, Validation loss : {}".format(epoch, avg_training_loss, acc, avg_validation_loss))
epochs_plt.append(epoch+1)
acc_plt.append(acc)
loss_plt.append(avg_training_loss)
valid_loss_plt.append(avg_validation_loss)
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
看来您实际上并没有调用精度功能。 The line
should be
It seems that you are not actually calling the accuracy function. The line
should be