conv2d参数(conv2d conv3d)

conv2d参数(conv2d conv3d)import argparse import os import copy import torch from torch import nn import torch optim as optim import torch backends cudnn as cudnn from torch utils data dataloader import DataLoader from tqdm import tqdm from models import FSRCNN from



import argparse
import os
import copy

import torch
from torch import nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm

from models import FSRCNN
from datasets import TrainDataset, EvalDataset
from utils import AverageMeter, calc_psnr


if __name__ == '__main__':
parser = argparse.ArgumentParser()
# 训练文件
parser.add_argument('--train-file', type=str,help="the dir of train data",default="https://www.ctyun.cn/zhishi/Train/91-image_x4.h5")
# 测试集文件
parser.add_argument('--eval-file', type=str,help="thr dir of test data ",default="https://www.ctyun.cn/zhishi/Test/Set5_x4.h5")
# 输出的文件夹
parser.add_argument('--outputs-dir',help="the output dir", type=str,default="https://www.ctyun.cn/zhishi/outputs")
parser.add_argument('--weights-file', type=str)
parser.add_argument('--scale', type=int, default=2)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument('--num-epochs', type=int, default=20)
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument('--seed', type=int, default=123)
args = parser.parse_args()

args.outputs_dir = os.path.join(args.outputs_dir, 'x{}'.format(args.scale))

if not os.path.exists(args.outputs_dir):
os.makedirs(args.outputs_dir)

cudnn.benchmark = True
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

torch.manual_seed(args.seed)

model = FSRCNN(scale_factor=args.scale).to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam([
{'params': model.first_part.parameters()},
{'params': model.mid_part.parameters()},
{'params': model.last_part.parameters(), 'lr': args.lr * 0.1}
], lr=args.lr)

train_dataset = TrainDataset(args.train_file)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
eval_dataset = EvalDataset(args.eval_file)
eval_dataloader = DataLoader(dataset=eval_dataset, batch_size=1)

best_weights = copy.deepcopy(model.state_dict())
best_epoch = 0
best_psnr = 0.0

for epoch in range(args.num_epochs):
model.train()
epoch_losses = AverageMeter()

with tqdm(total=(len(train_dataset) - len(train_dataset) % args.batch_size), ncols=80) as t:
t.set_description('epoch: {}/{}'.format(epoch, args.num_epochs - 1))

for data in train_dataloader:
inputs, labels = data

inputs = inputs.to(device)
labels = labels.to(device)

preds = model(inputs)

loss = criterion(preds, labels)

epoch_losses.update(loss.item(), len(inputs))

optimizer.zero_grad()
loss.backward()
optimizer.step()

t.set_postfix(loss='{:.6f}'.format(epoch_losses.avg))
t.update(len(inputs))

torch.save(model.state_dict(), os.path.join(args.outputs_dir, 'epoch_{}.pth'.format(epoch)))

model.eval()
epoch_psnr = AverageMeter()

for data in eval_dataloader:
inputs, labels = data

inputs = inputs.to(device)
labels = labels.to(device)

with torch.no_grad():
preds = model(inputs).clamp(0.0, 1.0)

epoch_psnr.update(calc_psnr(preds, labels), len(inputs))

print('eval psnr: {:.2f}'.format(epoch_psnr.avg))

if epoch_psnr.avg > best_psnr:
best_epoch = epoch
best_psnr = epoch_psnr.avg
best_weights = copy.deepcopy(model.state_dict())

print('best epoch: {}, psnr: {:.2f}'.format(best_epoch, best_psnr))
torch.save(best_weights, os.path.join(args.outputs_dir, 'best.pth'))
编程小号
上一篇 2025-03-15 13:40
下一篇 2025-02-28 17:33

相关推荐

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
如需转载请保留出处:https://bianchenghao.cn/bian-cheng-ri-ji/76798.html