ValueError:目标大小(TORCH.SIZE([[802816])))必须与输入大小(Torch.Size([6422528]))相同
参考我的问题 @ runtimeError:给定的组= 1,大小的重量,大小[64、64、1、1],预期输入[4、1、1080、1920],但是有1个频道,但有1个频道,我进行了建议的更正,并使用“ Train_unet_gaps.py”中的代码进行了以下代码调整图像(我不确定这是否是调整大小的正确方法,因为这是我第一次处理Torch的经验):
train_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
val_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
mask_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),transforms.Normalize((0.5,), (0.5,))])
但是,现在我会收到以下错误:
total images = 2410
create resnet101 model
Started training model from epoch 0
Epoch 0: 0% 0/2048 [00:00<?, ?it/s]Traceback (most recent call last):
File "/content/drive/Othercomputers/My Laptop/crack_segmentation_khanhha/crack_segmentation-master/train_unet_GAPs.py", line 261, in <module>
train(train_loader, model, criterion, optimizer, validate, args)
File "/content/drive/Othercomputers/My Laptop/crack_segmentation_khanhha/crack_segmentation-master/train_unet_GAPs.py", line 123, in train
loss = criterion(masks_probs_flat, true_masks_flat)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/loss.py", line 716, in forward
reduction=self.reduction)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py", line 3130, in binary_cross_entropy_with_logits
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
ValueError: Target size (torch.Size([802816])) must be the same as input size (torch.Size([6422528]))
Epoch 0: 0% 0/2048 [00:01<?, ?it/s]
2个尺寸之间的因子是8,我想这是num_classes。但是,我不知道在代码中可以在哪里更正。
编辑: 这是模型定义的代码:
class UNetResNet(nn.Module):
def __init__(self, encoder_depth, num_classes, num_filters=32, dropout_2d=0.2,
pretrained=False, is_deconv=False):
super().__init__()
self.num_classes = num_classes
self.dropout_2d = dropout_2d
if encoder_depth == 34:
self.encoder = torchvision.models.resnet34(pretrained=pretrained)
bottom_channel_nr = 512
elif encoder_depth == 101:
self.encoder = torchvision.models.resnet101(pretrained=pretrained)
bottom_channel_nr = 2048
elif encoder_depth == 152:
self.encoder = torchvision.models.resnet152(pretrained=pretrained)
bottom_channel_nr = 2048
else:
raise NotImplementedError('only 34, 101, 152 version of Resnet are implemented')
self.pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU(inplace=True)
#self.conv1 = nn.Sequential(self.encoder.conv1,
# self.encoder.bn1,
# self.encoder.relu,
# self.pool)
self.conv1 = nn.Sequential(nn.Conv2d(1,64,kernel_size=(7,7),stride=(2,2),padding=(3,3),bias=False), # 1 Here is for grayscale images, replace by 3 if you need RGB/BGR
nn.BatchNorm2d(64),
nn.ReLU(),
self.pool
)
self.conv2 = self.encoder.layer1
self.conv3 = self.encoder.layer2
self.conv4 = self.encoder.layer3
self.conv5 = self.encoder.layer4
self.center = DecoderBlockV2(bottom_channel_nr, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = DecoderBlockV2(bottom_channel_nr + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec4 = DecoderBlockV2(bottom_channel_nr // 2 + num_filters * 8, num_filters * 8 * 2, num_filters * 8,
is_deconv)
self.dec3 = DecoderBlockV2(bottom_channel_nr // 4 + num_filters * 8, num_filters * 4 * 2, num_filters * 2,
is_deconv)
self.dec2 = DecoderBlockV2(bottom_channel_nr // 8 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2,
is_deconv)
self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
self.dec0 = ConvRelu(num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
#self.final = nn.Conv2d(num_filters, 1, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
pool = self.pool(conv5)
center = self.center(pool)
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(dec2)
dec0 = self.dec0(dec1)
return self.final(F.dropout2d(dec0, p=self.dropout_2d))
edit2 - Train_unet_gaps.py中的代码
import torch
from torch import nn
from unet.unet_transfer import UNet16, UNetResNet
from pathlib import Path
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import torch.nn.functional as F
from torch.autograd import Variable
import shutil
from data_loader import ImgDataSet
import os
import argparse
import tqdm
import numpy as np
import scipy.ndimage as ndimage
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def create_model(device, type ='vgg16'):
if type == 'vgg16':
print('create vgg16 model')
model = UNet16(pretrained=True)
elif type == 'resnet101':
encoder_depth = 101
num_classes = 8
print('create resnet101 model')
model = UNetResNet(encoder_depth=encoder_depth, num_classes=num_classes, pretrained=True)
elif type == 'resnet34':
encoder_depth = 34
num_classes = 8
print('create resnet34 model')
model = UNetResNet(encoder_depth=encoder_depth, num_classes=num_classes, pretrained=True)
else:
assert False
model.eval()
return model.to(device)
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def find_latest_model_path(dir):
model_paths = []
epochs = []
for path in Path(dir).glob('*.pt'):
if 'epoch' not in path.stem:
continue
model_paths.append(path)
parts = path.stem.split('_')
epoch = int(parts[-1])
epochs.append(epoch)
if len(epochs) > 0:
epochs = np.array(epochs)
max_idx = np.argmax(epochs)
return model_paths[max_idx]
else:
return None
def train(train_loader, model, criterion, optimizer, validation, args):
latest_model_path = find_latest_model_path(args.model_dir)
best_model_path = os.path.join(*[args.model_dir, 'model_best.pt'])
if latest_model_path is not None:
state = torch.load(latest_model_path)
epoch = state['epoch']
model.load_state_dict(state['model'])
epoch = epoch
#if latest model path does exist, best_model_path should exists as well
assert Path(best_model_path).exists() == True, f'best model path {best_model_path} does not exist'
#load the min loss so far
best_state = torch.load(latest_model_path)
min_val_los = best_state['valid_loss']
print(f'Restored model at epoch {epoch}. Min validation loss so far is : {min_val_los}')
epoch += 1
print(f'Started training model from epoch {epoch}')
else:
print('Started training model from epoch 0')
epoch = 0
min_val_los = 9999
valid_losses = []
for epoch in range(epoch, args.n_epoch + 1):
adjust_learning_rate(optimizer, epoch, args.lr)
tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
tq.set_description(f'Epoch {epoch}')
losses = AverageMeter()
model.train()
for i, (input, target) in enumerate(train_loader):
input_var = Variable(input).cuda()
target_var = Variable(target).cuda()
masks_pred = model(input_var)
masks_probs_flat = masks_pred.view(-1)
true_masks_flat = target_var.view(-1)
print(masks_probs_flat.shape, true_masks_flat.shape)
loss = criterion(masks_probs_flat, true_masks_flat)
losses.update(loss)
tq.set_postfix(loss='{:.5f}'.format(losses.avg))
tq.update(args.batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
valid_metrics = validation(model, valid_loader, criterion)
valid_loss = valid_metrics['valid_loss']
valid_losses.append(valid_loss)
print(f'\tvalid_loss = {valid_loss:.5f}')
tq.close()
#save the model of the current epoch
epoch_model_path = os.path.join(*[args.model_dir, f'model_epoch_{epoch}.pt'])
torch.save({
'model': model.state_dict(),
'epoch': epoch,
'valid_loss': valid_loss,
'train_loss': losses.avg
}, epoch_model_path)
if valid_loss < min_val_los:
min_val_los = valid_loss
torch.save({
'model': model.state_dict(),
'epoch': epoch,
'valid_loss': valid_loss,
'train_loss': losses.avg
}, best_model_path)
def validate(model, val_loader, criterion):
losses = AverageMeter()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
input_var = Variable(input).cuda()
target_var = Variable(target).cuda()
output = model(input_var)
loss = criterion(output, target_var)
losses.update(loss.item(), input_var.size(0))
return {'valid_loss': losses.avg}
def save_check_point(state, is_best, file_name = 'checkpoint.pth.tar'):
torch.save(state, file_name)
if is_best:
shutil.copy(file_name, 'model_best.pth.tar')
def calc_crack_pixel_weight(mask_dir):
avg_w = 0.0
n_files = 0
for path in Path(mask_dir).glob('*.*'):
n_files += 1
m = ndimage.imread(path)
ncrack = np.sum((m > 0)[:])
w = float(ncrack)/(m.shape[0]*m.shape[1])
avg_w = avg_w + (1-w)
avg_w /= float(n_files)
return avg_w / (1.0 - avg_w)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-n_epoch', default=10, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-lr', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('-momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('-print_freq', default=20, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('-weight_decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-batch_size', default=4, type=int, help='weight decay (default: 1e-4)')
#parser.add_argument('-batch_size', default=2, type=int, help='weight decay (default: 1e-4)')
#parser.add_argument('-num_workers', default=4, type=int, help='output dataset directory')
parser.add_argument('-num_workers', default=2, type=int, help='output dataset directory')
parser.add_argument('-data_dir',type=str, help='input dataset directory')
parser.add_argument('-model_dir', type=str, help='output dataset directory')
parser.add_argument('-model_type', type=str, required=False, default='resnet101', choices=['vgg16', 'resnet101', 'resnet34'])
args = parser.parse_args()
os.makedirs(args.model_dir, exist_ok=True)
DIR_IMG = os.path.join(args.data_dir, 'images')
DIR_MASK = os.path.join(args.data_dir, 'masks')
img_names = [path.name for path in Path(DIR_IMG).glob('*.jpg')]
mask_names = [path.name for path in Path(DIR_MASK).glob('*.png')]
print(f'total images = {len(img_names)}')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = create_model(device, args.model_type)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
#crack_weight = 0.4*calc_crack_pixel_weight(DIR_MASK)
#print(f'positive weight: {crack_weight}')
#criterion = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([crack_weight]).to('cuda'))
criterion = nn.BCEWithLogitsLoss().to('cuda')
#channel_means = [0.485, 0.456, 0.406]
#channel_stds = [0.229, 0.224, 0.225]
channel_means = [0.5]
channel_stds = [0.5]
train_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
val_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
mask_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),transforms.Normalize((0.5,), (0.5,))])
'''
mask_tfms = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3,1,1)),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
'''
dataset = ImgDataSet(img_dir=DIR_IMG, img_fnames=img_names, img_transform=train_tfms, mask_dir=DIR_MASK, mask_fnames=mask_names, mask_transform=mask_tfms)
train_size = int(0.85*len(dataset))
valid_size = len(dataset) - train_size
train_dataset, valid_dataset = random_split(dataset, [train_size, valid_size])
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=False, pin_memory=torch.cuda.is_available(), num_workers=args.num_workers)
valid_loader = DataLoader(valid_dataset, args.batch_size, shuffle=False, pin_memory=torch.cuda.is_available(), num_workers=args.num_workers)
#model.cuda()
model.to(torch.device("cuda:0"))
train(train_loader, model, criterion, optimizer, validate, args)
Referring to my question @ RuntimeError: Given groups=1, weight of size [64, 64, 1, 1], expected input[4, 1, 1080, 1920] to have 64 channels, but got 1 channels instead, I made the suggested corrections, and resized the images using the following lines of code in "train_unet_GAPs.py" (I'm not sure whether this is the correct way of resizing as this is my first experience dealing with torch):
train_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
val_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
mask_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),transforms.Normalize((0.5,), (0.5,))])
However, now I'm getting the following error:
total images = 2410
create resnet101 model
Started training model from epoch 0
Epoch 0: 0% 0/2048 [00:00<?, ?it/s]Traceback (most recent call last):
File "/content/drive/Othercomputers/My Laptop/crack_segmentation_khanhha/crack_segmentation-master/train_unet_GAPs.py", line 261, in <module>
train(train_loader, model, criterion, optimizer, validate, args)
File "/content/drive/Othercomputers/My Laptop/crack_segmentation_khanhha/crack_segmentation-master/train_unet_GAPs.py", line 123, in train
loss = criterion(masks_probs_flat, true_masks_flat)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/modules/loss.py", line 716, in forward
reduction=self.reduction)
File "/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py", line 3130, in binary_cross_entropy_with_logits
raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
ValueError: Target size (torch.Size([802816])) must be the same as input size (torch.Size([6422528]))
Epoch 0: 0% 0/2048 [00:01<?, ?it/s]
The factor between the 2 sizes is 8, which I guess is num_classes. However, I don't know where I can correct this in the code.
The folder containing all files is at https://drive.google.com/drive/folders/14NQdtMXokIixBJ5XizexVECn23Jh9aTM?usp=sharing
EDIT:
Here is the code for model definition:
class UNetResNet(nn.Module):
def __init__(self, encoder_depth, num_classes, num_filters=32, dropout_2d=0.2,
pretrained=False, is_deconv=False):
super().__init__()
self.num_classes = num_classes
self.dropout_2d = dropout_2d
if encoder_depth == 34:
self.encoder = torchvision.models.resnet34(pretrained=pretrained)
bottom_channel_nr = 512
elif encoder_depth == 101:
self.encoder = torchvision.models.resnet101(pretrained=pretrained)
bottom_channel_nr = 2048
elif encoder_depth == 152:
self.encoder = torchvision.models.resnet152(pretrained=pretrained)
bottom_channel_nr = 2048
else:
raise NotImplementedError('only 34, 101, 152 version of Resnet are implemented')
self.pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU(inplace=True)
#self.conv1 = nn.Sequential(self.encoder.conv1,
# self.encoder.bn1,
# self.encoder.relu,
# self.pool)
self.conv1 = nn.Sequential(nn.Conv2d(1,64,kernel_size=(7,7),stride=(2,2),padding=(3,3),bias=False), # 1 Here is for grayscale images, replace by 3 if you need RGB/BGR
nn.BatchNorm2d(64),
nn.ReLU(),
self.pool
)
self.conv2 = self.encoder.layer1
self.conv3 = self.encoder.layer2
self.conv4 = self.encoder.layer3
self.conv5 = self.encoder.layer4
self.center = DecoderBlockV2(bottom_channel_nr, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec5 = DecoderBlockV2(bottom_channel_nr + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)
self.dec4 = DecoderBlockV2(bottom_channel_nr // 2 + num_filters * 8, num_filters * 8 * 2, num_filters * 8,
is_deconv)
self.dec3 = DecoderBlockV2(bottom_channel_nr // 4 + num_filters * 8, num_filters * 4 * 2, num_filters * 2,
is_deconv)
self.dec2 = DecoderBlockV2(bottom_channel_nr // 8 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2,
is_deconv)
self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)
self.dec0 = ConvRelu(num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
#self.final = nn.Conv2d(num_filters, 1, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
pool = self.pool(conv5)
center = self.center(pool)
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(dec2)
dec0 = self.dec0(dec1)
return self.final(F.dropout2d(dec0, p=self.dropout_2d))
EDIT2 - The code in train_unet_GAPs.py
import torch
from torch import nn
from unet.unet_transfer import UNet16, UNetResNet
from pathlib import Path
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset, random_split
import torch.nn.functional as F
from torch.autograd import Variable
import shutil
from data_loader import ImgDataSet
import os
import argparse
import tqdm
import numpy as np
import scipy.ndimage as ndimage
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def create_model(device, type ='vgg16'):
if type == 'vgg16':
print('create vgg16 model')
model = UNet16(pretrained=True)
elif type == 'resnet101':
encoder_depth = 101
num_classes = 8
print('create resnet101 model')
model = UNetResNet(encoder_depth=encoder_depth, num_classes=num_classes, pretrained=True)
elif type == 'resnet34':
encoder_depth = 34
num_classes = 8
print('create resnet34 model')
model = UNetResNet(encoder_depth=encoder_depth, num_classes=num_classes, pretrained=True)
else:
assert False
model.eval()
return model.to(device)
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def find_latest_model_path(dir):
model_paths = []
epochs = []
for path in Path(dir).glob('*.pt'):
if 'epoch' not in path.stem:
continue
model_paths.append(path)
parts = path.stem.split('_')
epoch = int(parts[-1])
epochs.append(epoch)
if len(epochs) > 0:
epochs = np.array(epochs)
max_idx = np.argmax(epochs)
return model_paths[max_idx]
else:
return None
def train(train_loader, model, criterion, optimizer, validation, args):
latest_model_path = find_latest_model_path(args.model_dir)
best_model_path = os.path.join(*[args.model_dir, 'model_best.pt'])
if latest_model_path is not None:
state = torch.load(latest_model_path)
epoch = state['epoch']
model.load_state_dict(state['model'])
epoch = epoch
#if latest model path does exist, best_model_path should exists as well
assert Path(best_model_path).exists() == True, f'best model path {best_model_path} does not exist'
#load the min loss so far
best_state = torch.load(latest_model_path)
min_val_los = best_state['valid_loss']
print(f'Restored model at epoch {epoch}. Min validation loss so far is : {min_val_los}')
epoch += 1
print(f'Started training model from epoch {epoch}')
else:
print('Started training model from epoch 0')
epoch = 0
min_val_los = 9999
valid_losses = []
for epoch in range(epoch, args.n_epoch + 1):
adjust_learning_rate(optimizer, epoch, args.lr)
tq = tqdm.tqdm(total=(len(train_loader) * args.batch_size))
tq.set_description(f'Epoch {epoch}')
losses = AverageMeter()
model.train()
for i, (input, target) in enumerate(train_loader):
input_var = Variable(input).cuda()
target_var = Variable(target).cuda()
masks_pred = model(input_var)
masks_probs_flat = masks_pred.view(-1)
true_masks_flat = target_var.view(-1)
print(masks_probs_flat.shape, true_masks_flat.shape)
loss = criterion(masks_probs_flat, true_masks_flat)
losses.update(loss)
tq.set_postfix(loss='{:.5f}'.format(losses.avg))
tq.update(args.batch_size)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
valid_metrics = validation(model, valid_loader, criterion)
valid_loss = valid_metrics['valid_loss']
valid_losses.append(valid_loss)
print(f'\tvalid_loss = {valid_loss:.5f}')
tq.close()
#save the model of the current epoch
epoch_model_path = os.path.join(*[args.model_dir, f'model_epoch_{epoch}.pt'])
torch.save({
'model': model.state_dict(),
'epoch': epoch,
'valid_loss': valid_loss,
'train_loss': losses.avg
}, epoch_model_path)
if valid_loss < min_val_los:
min_val_los = valid_loss
torch.save({
'model': model.state_dict(),
'epoch': epoch,
'valid_loss': valid_loss,
'train_loss': losses.avg
}, best_model_path)
def validate(model, val_loader, criterion):
losses = AverageMeter()
model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
input_var = Variable(input).cuda()
target_var = Variable(target).cuda()
output = model(input_var)
loss = criterion(output, target_var)
losses.update(loss.item(), input_var.size(0))
return {'valid_loss': losses.avg}
def save_check_point(state, is_best, file_name = 'checkpoint.pth.tar'):
torch.save(state, file_name)
if is_best:
shutil.copy(file_name, 'model_best.pth.tar')
def calc_crack_pixel_weight(mask_dir):
avg_w = 0.0
n_files = 0
for path in Path(mask_dir).glob('*.*'):
n_files += 1
m = ndimage.imread(path)
ncrack = np.sum((m > 0)[:])
w = float(ncrack)/(m.shape[0]*m.shape[1])
avg_w = avg_w + (1-w)
avg_w /= float(n_files)
return avg_w / (1.0 - avg_w)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-n_epoch', default=10, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('-lr', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('-momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('-print_freq', default=20, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('-weight_decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('-batch_size', default=4, type=int, help='weight decay (default: 1e-4)')
#parser.add_argument('-batch_size', default=2, type=int, help='weight decay (default: 1e-4)')
#parser.add_argument('-num_workers', default=4, type=int, help='output dataset directory')
parser.add_argument('-num_workers', default=2, type=int, help='output dataset directory')
parser.add_argument('-data_dir',type=str, help='input dataset directory')
parser.add_argument('-model_dir', type=str, help='output dataset directory')
parser.add_argument('-model_type', type=str, required=False, default='resnet101', choices=['vgg16', 'resnet101', 'resnet34'])
args = parser.parse_args()
os.makedirs(args.model_dir, exist_ok=True)
DIR_IMG = os.path.join(args.data_dir, 'images')
DIR_MASK = os.path.join(args.data_dir, 'masks')
img_names = [path.name for path in Path(DIR_IMG).glob('*.jpg')]
mask_names = [path.name for path in Path(DIR_MASK).glob('*.png')]
print(f'total images = {len(img_names)}')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = create_model(device, args.model_type)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
#crack_weight = 0.4*calc_crack_pixel_weight(DIR_MASK)
#print(f'positive weight: {crack_weight}')
#criterion = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([crack_weight]).to('cuda'))
criterion = nn.BCEWithLogitsLoss().to('cuda')
#channel_means = [0.485, 0.456, 0.406]
#channel_stds = [0.229, 0.224, 0.225]
channel_means = [0.5]
channel_stds = [0.5]
train_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
val_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),
transforms.Normalize(channel_means, channel_stds)])
mask_tfms = transforms.Compose([transforms.Resize((448,448)), transforms.ToTensor(),transforms.Normalize((0.5,), (0.5,))])
'''
mask_tfms = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3,1,1)),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
'''
dataset = ImgDataSet(img_dir=DIR_IMG, img_fnames=img_names, img_transform=train_tfms, mask_dir=DIR_MASK, mask_fnames=mask_names, mask_transform=mask_tfms)
train_size = int(0.85*len(dataset))
valid_size = len(dataset) - train_size
train_dataset, valid_dataset = random_split(dataset, [train_size, valid_size])
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=False, pin_memory=torch.cuda.is_available(), num_workers=args.num_workers)
valid_loader = DataLoader(valid_dataset, args.batch_size, shuffle=False, pin_memory=torch.cuda.is_available(), num_workers=args.num_workers)
#model.cuda()
model.to(torch.device("cuda:0"))
train(train_loader, model, criterion, optimizer, validate, args)
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论