张板“未找到标量数据”。
这是我第一次使用张板。 我遵循了许多教程,但是回到我身上的结果与下面的图像相同。
这是我使用张量板编写的代码。
self.writer = SummaryWriter('SimCLR_Code/SimCLR/runs')
...
def train(self, train_loader):
scaler = GradScaler(enabled=self.args.fp16_precision)
sess = tf.Session()
self.writer = tf.summary.FileWriter('./runs/graphs', sess.graph)
#saver = tf.train.Saver(tf.global_variables())
#ckpt = tf.train.get
# save config file
save_config_file(self.writer.log_dir, self.args)
n_iter = 0 #global optimization step
logging.info(f"Start SimCLR training for {self.args.epochs} epochs.")
logging.info(f"Training with gpu: {self.args.disable_cuda}.")
for epoch_counter in range(self.args.epochs):
for images, _ in tqdm(train_loader):
images = torch.cat(images, dim=0)
images = images.to(self.args.device)
with autocast(enabled=self.args.fp16_precision):
features = self.model(images)
logits, labels = self.info_nce_loss(features)
loss = self.criterion(logits, labels)
self.optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(self.optimizer)
scaler.update()
if n_iter % self.args.log_every_n_steps == 0:
top1, top5 = accuracy(logits, labels, topk=(1, 5))
self.writer.add_scalar('loss', loss, global_step=n_iter)
self.writer.add_scalar('acc/top1', top1[0], global_step=n_iter)
self.writer.add_scalar('acc/top5', top5[0], global_step=n_iter)
self.writer.add_scalar('learning_rate', self.scheduler.get_lr()[0], global_step=n_iter)
n_iter += 1
# warmup for the first 10 epochs
if epoch_counter >= 10:
try:
self.scheduler.step()
except ZeroDivisionError:
print("ZeroDivision")
logging.debug(f"Epoch: {epoch_counter}\tLoss: {loss}\tTop1 accuracy: {top1[0]}") #훈련과정 출력
logging.info("Training has finished.")
# save model checkpoints
checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(self.args.epochs)
save_checkpoint({
'epoch': self.args.epochs,
'arch': self.args.arch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}, is_best=False, filename=os.path.join(self.writer.log_dir, checkpoint_name))
logging.info(f"Model checkpoint and metadata has been saved at {self.writer.log_dir}.")
# merged = tf.summary.merge_all()
# writer = tf.summary.FileWriter
self.writer.flush()
self.writer.close()
这是我的文件路由
我已经考虑了几天,但是没有进展。 请给我很多建议。
This is my first time using the tensorboard.
I've followed many tutorials, but the result that comes back to me is the same as the attached image below.
This is the code I wrote using the tensorboard.
self.writer = SummaryWriter('SimCLR_Code/SimCLR/runs')
...
def train(self, train_loader):
scaler = GradScaler(enabled=self.args.fp16_precision)
sess = tf.Session()
self.writer = tf.summary.FileWriter('./runs/graphs', sess.graph)
#saver = tf.train.Saver(tf.global_variables())
#ckpt = tf.train.get
# save config file
save_config_file(self.writer.log_dir, self.args)
n_iter = 0 #global optimization step
logging.info(f"Start SimCLR training for {self.args.epochs} epochs.")
logging.info(f"Training with gpu: {self.args.disable_cuda}.")
for epoch_counter in range(self.args.epochs):
for images, _ in tqdm(train_loader):
images = torch.cat(images, dim=0)
images = images.to(self.args.device)
with autocast(enabled=self.args.fp16_precision):
features = self.model(images)
logits, labels = self.info_nce_loss(features)
loss = self.criterion(logits, labels)
self.optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(self.optimizer)
scaler.update()
if n_iter % self.args.log_every_n_steps == 0:
top1, top5 = accuracy(logits, labels, topk=(1, 5))
self.writer.add_scalar('loss', loss, global_step=n_iter)
self.writer.add_scalar('acc/top1', top1[0], global_step=n_iter)
self.writer.add_scalar('acc/top5', top5[0], global_step=n_iter)
self.writer.add_scalar('learning_rate', self.scheduler.get_lr()[0], global_step=n_iter)
n_iter += 1
# warmup for the first 10 epochs
if epoch_counter >= 10:
try:
self.scheduler.step()
except ZeroDivisionError:
print("ZeroDivision")
logging.debug(f"Epoch: {epoch_counter}\tLoss: {loss}\tTop1 accuracy: {top1[0]}") #훈련과정 출력
logging.info("Training has finished.")
# save model checkpoints
checkpoint_name = 'checkpoint_{:04d}.pth.tar'.format(self.args.epochs)
save_checkpoint({
'epoch': self.args.epochs,
'arch': self.args.arch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
}, is_best=False, filename=os.path.join(self.writer.log_dir, checkpoint_name))
logging.info(f"Model checkpoint and metadata has been saved at {self.writer.log_dir}.")
# merged = tf.summary.merge_all()
# writer = tf.summary.FileWriter
self.writer.flush()
self.writer.close()
And here is my file routes
I've been thinking about it for several days, but there's no progress.
Please give me a lot of advice.
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论