서민정

docs&code: 중간보고서 수정 및 code 업로드

The MIT License (MIT)
Copyright (c) 2017- Jiu XU
Copyright (c) 2017- Rakuten, Inc
Copyright (c) 2017- Rakuten Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
\ No newline at end of file
# PyTorch VDSR
Implementation of CVPR2016 Paper: "Accurate Image Super-Resolution Using
Very Deep Convolutional Networks"(http://cv.snu.ac.kr/research/VDSR/) in PyTorch
## Usage
### Training
```
usage: main_vdsr.py [-h] [--batchSize BATCHSIZE] [--nEpochs NEPOCHS] [--lr LR]
[--step STEP] [--cuda] [--resume RESUME]
[--start-epoch START_EPOCH] [--clip CLIP] [--threads THREADS]
[--momentum MOMENTUM] [--weight-decay WEIGHT_DECAY]
[--pretrained PRETRAINED] [--gpus GPUS]
optional arguments:
-h, --help Show this help message and exit
--batchSize Training batch size
--nEpochs Number of epochs to train for
--lr Learning rate. Default=0.01
--step Learning rate decay, Default: n=10 epochs
--cuda Use cuda
--resume Path to checkpoint
--clip Clipping Gradients. Default=0.4
--threads Number of threads for data loader to use Default=1
--momentum Momentum, Default: 0.9
--weight-decay Weight decay, Default: 1e-4
--pretrained PRETRAINED
path to pretrained model (default: none)
--gpus GPUS gpu ids (default: 0)
```
An example of training usage is shown as follows:
```
python main_vdsr.py --cuda --gpus 0
```
### Evaluation
```
usage: eval.py [-h] [--cuda] [--model MODEL] [--dataset DATASET]
[--scale SCALE] [--gpus GPUS]
PyTorch VDSR Eval
optional arguments:
-h, --help show this help message and exit
--cuda use cuda?
--model MODEL model path
--dataset DATASET dataset name, Default: Set5
--gpus GPUS gpu ids (default: 0)
```
An example of training usage is shown as follows:
```
python eval.py --cuda --dataset Set5
```
### Demo
```
usage: demo.py [-h] [--cuda] [--model MODEL] [--image IMAGE] [--scale SCALE] [--gpus GPUS]
optional arguments:
-h, --help Show this help message and exit
--cuda Use cuda
--model Model path. Default=model/model_epoch_50.pth
--image Image name. Default=butterfly_GT
--scale Scale factor, Default: 4
--gpus GPUS gpu ids (default: 0)
```
An example of usage is shown as follows:
```
python eval.py --model model/model_epoch_50.pth --dataset Set5 --cuda
```
### Prepare Training dataset
- We provide a simple hdf5 format training sample in data folder with 'data' and 'label' keys, the training data is generated with Matlab Bicubic Interplotation, please refer [Code for Data Generation](https://github.com/twtygqyy/pytorch-vdsr/tree/master/data) for creating training files.
### Performance
- We provide a pretrained VDSR model trained on [291](https://drive.google.com/open?id=1Rt3asDLuMgLuJvPA1YrhyjWhb97Ly742) images with data augmentation
- No bias is used in this implementation, and the gradient clipping's implementation is different from paper
- Performance in PSNR on Set5
| Scale | VDSR Paper | VDSR PyTorch|
| ------------- |:-------------:| -----:|
| 2x | 37.53 | 37.65 |
| 3x | 33.66 | 33.77|
| 4x | 31.35 | 31.45 |
### Result
From left to right are ground truth, bicubic and vdsr
<p>
<img src='Set5/butterfly_GT.bmp' height='200' width='200'/>
<img src='result/input.bmp' height='200' width='200'/>
<img src='result/output.bmp' height='200' width='200'/>
</p>
import torch.utils.data as data
import torch
import h5py
class DatasetFromHdf5(data.Dataset):
def __init__(self, file_path):
super(DatasetFromHdf5, self).__init__()
hf = h5py.File(file_path)
self.data = hf.get('data')
self.target = hf.get('label')
def __getitem__(self, index):
return torch.from_numpy(self.data[index,:,:,:]).float(), torch.from_numpy(self.target[index,:,:,:]).float()
def __len__(self):
return self.data.shape[0]
\ No newline at end of file
import argparse, os
import torch
from torch.autograd import Variable
from scipy.ndimage import imread
from PIL import Image
import numpy as np
import time, math
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="PyTorch VDSR Demo")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="model/model_epoch_50.pth", type=str, help="model path")
parser.add_argument("--image", default="butterfly_GT", type=str, help="image name")
parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def colorize(y, ycbcr):
img = np.zeros((y.shape[0], y.shape[1], 3), np.uint8)
img[:,:,0] = y
img[:,:,1] = ycbcr[:,:,1]
img[:,:,2] = ycbcr[:,:,2]
img = Image.fromarray(img, "YCbCr").convert("RGB")
return img
opt = parser.parse_args()
cuda = opt.cuda
if cuda:
print("=> use gpu id: '{}'".format(opt.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
if not torch.cuda.is_available():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model"]
im_gt_ycbcr = imread("Set5/" + opt.image + ".bmp", mode="YCbCr")
im_b_ycbcr = imread("Set5/"+ opt.image + "_scale_"+ str(opt.scale) + ".bmp", mode="YCbCr")
im_gt_y = im_gt_ycbcr[:,:,0].astype(float)
im_b_y = im_b_ycbcr[:,:,0].astype(float)
psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=opt.scale)
im_input = im_b_y/255.
im_input = Variable(torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1])
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
out = model(im_input)
elapsed_time = time.time() - start_time
out = out.cpu()
im_h_y = out.data[0].numpy().astype(np.float32)
im_h_y = im_h_y * 255.
im_h_y[im_h_y < 0] = 0
im_h_y[im_h_y > 255.] = 255.
psnr_predicted = PSNR(im_gt_y, im_h_y[0,:,:], shave_border=opt.scale)
im_h = colorize(im_h_y[0,:,:], im_b_ycbcr)
im_gt = Image.fromarray(im_gt_ycbcr, "YCbCr").convert("RGB")
im_b = Image.fromarray(im_b_ycbcr, "YCbCr").convert("RGB")
print("Scale=",opt.scale)
print("PSNR_predicted=", psnr_predicted)
print("PSNR_bicubic=", psnr_bicubic)
print("It takes {}s for processing".format(elapsed_time))
fig = plt.figure()
ax = plt.subplot("131")
ax.imshow(im_gt)
ax.set_title("GT")
ax = plt.subplot("132")
ax.imshow(im_b)
ax.set_title("Input(bicubic)")
ax = plt.subplot("133")
ax.imshow(im_h)
ax.set_title("Output(vdsr)")
plt.show()
import argparse, os
import torch
from torch.autograd import Variable
import numpy as np
import time, math, glob
import scipy.io as sio
parser = argparse.ArgumentParser(description="PyTorch VDSR Eval")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="model/model_epoch_50.pth", type=str, help="model path")
parser.add_argument("--dataset", default="Set5", type=str, help="dataset name, Default: Set5")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
opt = parser.parse_args()
cuda = opt.cuda
if cuda:
print("=> use gpu id: '{}'".format(opt.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
if not torch.cuda.is_available():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model"]
scales = [2,3,4]
image_list = glob.glob(opt.dataset+"_mat/*.*")
for scale in scales:
avg_psnr_predicted = 0.0
avg_psnr_bicubic = 0.0
avg_elapsed_time = 0.0
count = 0.0
for image_name in image_list:
if str(scale) in image_name:
count += 1
print("Processing ", image_name)
im_gt_y = sio.loadmat(image_name)['im_gt_y']
im_b_y = sio.loadmat(image_name)['im_b_y']
im_gt_y = im_gt_y.astype(float)
im_b_y = im_b_y.astype(float)
psnr_bicubic = PSNR(im_gt_y, im_b_y,shave_border=scale)
avg_psnr_bicubic += psnr_bicubic
im_input = im_b_y/255.
im_input = Variable(torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1])
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
HR = model(im_input)
elapsed_time = time.time() - start_time
avg_elapsed_time += elapsed_time
HR = HR.cpu()
im_h_y = HR.data[0].numpy().astype(np.float32)
im_h_y = im_h_y * 255.
im_h_y[im_h_y < 0] = 0
im_h_y[im_h_y > 255.] = 255.
im_h_y = im_h_y[0,:,:]
psnr_predicted = PSNR(im_gt_y, im_h_y,shave_border=scale)
avg_psnr_predicted += psnr_predicted
print("Scale=", scale)
print("Dataset=", opt.dataset)
print("PSNR_predicted=", avg_psnr_predicted/count)
print("PSNR_bicubic=", avg_psnr_bicubic/count)
print("It takes average {}s for processing".format(avg_elapsed_time/count))
# Feature SR
1. train
`!python main.py --dataRoot /content/drive/MyDrive/feature/HR_trainset/features --scaleFactor 4 --featureType p6 --batchSize 16 --cuda --nEpochs 20`
2. inference
`!python inference.py --cuda --model "model.pth" --dataset "/content/drive/MyDrive/feature/features/LR_2" --featureType "p3" --scaleFactor 4`
3. calculate mAP
```
# [1]
# install dependencies:
!pip install pyyaml==5.1
import torch, torchvision
print(torch.__version__, torch.cuda.is_available())
!gcc --version
# opencv is pre-installed on colab
# [2]
# install detectron2: (Colab has CUDA 10.1 + torch 1.8)
# See https://detectron2.readthedocs.io/tutorials/install.html for instructions
import torch
assert torch.__version__.startswith("1.8") # need to manually install torch 1.8 if Colab changes its default version
!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html
# exit(0) # After installation, you need to "restart runtime" in Colab. This line can also restart runtime
# [3]
# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
!python calculate_mAP.py --valid_data_path /content/drive/MyDrive/dataset/validset_100/ --model_name VDSR --loss_type MSE --batch_size 16
```
\ No newline at end of file
# # # [1]
# # # install dependencies:
# !pip install pyyaml==5.1
# import torch, torchvision
# print(torch.__version__, torch.cuda.is_available())
# !gcc --version
# # opencv is pre-installed on colab
# # # [2]
# # # install detectron2: (Colab has CUDA 10.1 + torch 1.8)
# # # See https://detectron2.readthedocs.io/tutorials/install.html for instructions
# import torch
# assert torch.__version__.startswith("1.8") # need to manually install torch 1.8 if Colab changes its default version
# !pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html
# # exit(0) # After installation, you need to "restart runtime" in Colab. This line can also restart runtime
# # # [3]
# # # Some basic setup:
# # # Setup detectron2 logger
# import detectron2
# from detectron2.utils.logger import setup_logger
# setup_logger()
# import some common libraries
import torch
import numpy as np
import os, json, cv2, random, math
from PIL import Image
from torch.nn.utils.rnn import pad_sequence
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.modeling import build_model, build_backbone
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.utils.visualizer import Visualizer
import detectron2.data.transforms as T
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools.mask import encode
import argparse
parser = argparse.ArgumentParser(description="PyTorch CARN")
parser.add_argument("--data_path", type=str, default = "/home/ubuntu/JH/exp1/dataset")
parser.add_argument("--valid_data_path", type=str)
parser.add_argument("--rescale_factor", type=int, default=4, help="rescale factor for using in training")
parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training")
parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training")
parser.add_argument('--batch_size', type=int, default=256)
opt = parser.parse_args()
print(opt)
def myRound(x): # 양수와 음수에 대해 0을 대칭으로 rounding
abs_x = abs(x)
val = np.int16(abs_x + 0.5)
val2 = np.choose(
x < 0,
[
val, val*(-1)
]
)
return val2
def myClip(x, maxV):
val = np.choose(
x > maxV,
[
x, maxV
]
)
return val
image_idx = 0
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
image_idx = 0
anns = 0
# Original_8bit
image_files = ['001000', '002153', '008021', '009769', '009891', '015335', '017627', '018150', '018837', '022589']
image_files.extend(['022935', '023230', '024610', '025560', '025593', '027620', '155341', '161397', '165336', '166287'])
image_files.extend(['166642', '169996', '172330', '172648', '176606', '176701', '179765', '180101', '186296', '250758'])
image_files.extend(['259382', '267191', '287545', '287649', '289741', '293245', '308328', '309452', '335529', '337987'])
image_files.extend(['338625', '344029', '350122', '389933', '393226', '395343', '395633', '401862', '402473', '402992'])
image_files.extend(['404568', '406997', '408112', '410650', '414385', '414795', '415194', '415536', '416104', '416758'])
image_files.extend(['427055', '428562', '430073', '433204', '447200', '447313', '448448', '452321', '453001', '458755'])
image_files.extend(['462904', '463522', '464089', '468965', '469192', '469246', '471450', '474078', '474881', '475678'])
image_files.extend(['475779', '537802', '542625', '543043', '543300', '543528', '547502', '550691', '553669', '567740'])
image_files.extend(['570688', '570834', '571943', '573391', '574315', '575372', '575970', '578093', '579158', '581100'])
for iter in range(0, 100):
image_file_number = image_files[image_idx]
aug = T.ResizeShortestEdge(
# [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
# [480, 480], cfg.INPUT.MAX_SIZE_TEST
[768, 768], cfg.INPUT.MAX_SIZE_TEST
)
image_prefix = "COCO_val2017_"
image = cv2.imread(opt.valid_data_path + '000000'+ image_file_number +'.jpg')
# image = cv2.imread('./dataset/validset_100/000000'+ image_file_number +'.jpg')
height, width = image.shape[:2]
image = aug.get_transform(image).apply_image(image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = [{"image": image, "height": height, "width": width}]
with torch.no_grad():
images = model.preprocess_image(inputs) # don't forget to preprocess
features = model.backbone(images.tensor) # set of cnn features
p2_feature_original = features['p2'].to("cpu")
p3_feature_original = features['p3'].to("cpu")
p4_feature_original = features['p4'].to("cpu")
bitDepth = 8
maxRange = [0, 0, 0, 0, 0]
def maxVal(x):
return pow(2, x)
def offsetVal(x):
return pow(2, x-1)
def maxRange_layer(x):
absolute_arr = torch.abs(x) * 2
max_arr = torch.max(absolute_arr)
return torch.ceil(max_arr)
act2 = p2_feature_original.squeeze()
maxRange[0] = maxRange_layer(act2)
act3 = p3_feature_original.squeeze()
maxRange[1] = maxRange_layer(act3)
act4 = p4_feature_original.squeeze()
maxRange[2] = maxRange_layer(act4)
globals()['maxRange_{}'.format(image_file_number)] = maxRange
# p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png'
# p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter)))
p2_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p2/' + image_prefix + '000000' + image_file_number + '_p2' +'.png')
# # y_p2, cb, cr = p2_feature_img.split()
p2_feature_arr = np.array(p2_feature_img)
p2_feature_arr_round = myRound(p2_feature_arr)
# p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png')
# p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter)))
p3_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p3/' + image_prefix + '000000' + image_file_number + '_p3' +'.png')
# # y_p3, cb2, cr2 = p3_feature_img.split()
p3_feature_arr = np.array(p3_feature_img)
p3_feature_arr_round = myRound(p3_feature_arr)
# p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png')
# p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter)))
p4_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p4/' + image_prefix + '000000' + image_file_number + '_p4' +'.png')
# y_p4, cb3, cr3 = p4_feature_img.split()
p4_feature_arr = np.array(p4_feature_img)
p4_feature_arr_round = myRound(p4_feature_arr)
# 복원
recon_p2 = (((p2_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[0].numpy())
recon_p3 = (((p3_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[1].numpy())
recon_p4 = (((p4_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[2].numpy())
tensor_value = recon_p2
tensor_value2 = recon_p3
tensor_value3 = recon_p4
# # MSB 코드 끝
# lsb 및 원래 코드
# 복원
# recon_p2 = (((p2_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[0].numpy())
# recon_p3 = (((p3_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[1].numpy())
# recon_p4 = (((p4_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[2].numpy())
# recon_p5 = (((p5_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[3].numpy())
# recon_p6 = (((p6_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[4].numpy())
tensor_value = torch.as_tensor(recon_p2.astype("float32"))
tensor_value2 = torch.as_tensor(recon_p3.astype("float32"))
tensor_value3 = torch.as_tensor(recon_p4.astype("float32"))
#lsb 및 원래 코드 끝
t = [None] * 16
t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[9], t[10], t[11], t[12], t[13], t[14], t[15] = torch.chunk(tensor_value, 16, dim=0)
p2 = [None] * 256
t2 = [None] * 16
t2[0], t2[1], t2[2], t2[3], t2[4], t2[5], t2[6], t2[7], t2[8], t2[9], t2[10], t2[11], t2[12], t2[13], t2[14], t2[15] = torch.chunk(tensor_value2, 16, dim=0)
p3 = [None] * 256
t3 = [None] * 16
t3[0], t3[1], t3[2], t3[3], t3[4], t3[5], t3[6], t3[7], t3[8], t3[9], t3[10], t3[11], t3[12], t3[13], t3[14], t3[15] = torch.chunk(tensor_value3, 16, dim=0)
p4 = [None] * 256
p2[0], p2[1], p2[2], p2[3], p2[4], p2[5], p2[6], p2[7], p2[8], p2[9], p2[10], p2[11], p2[12], p2[13], p2[14], p2[15] = torch.chunk(t[0], 16, dim=1)
p2[16], p2[17], p2[18], p2[19], p2[20], p2[21], p2[22], p2[23], p2[24], p2[25], p2[26], p2[27], p2[28], p2[29], p2[30], p2[31] = torch.chunk(t[1], 16, dim=1)
p2[32], p2[33], p2[34], p2[35], p2[36], p2[37], p2[38], p2[39], p2[40], p2[41], p2[42], p2[43], p2[44], p2[45], p2[46], p2[47] = torch.chunk(t[2], 16, dim=1)
p2[48], p2[49], p2[50], p2[51], p2[52], p2[53], p2[54], p2[55], p2[56], p2[57], p2[58], p2[59], p2[60], p2[61], p2[62], p2[63] = torch.chunk(t[3], 16, dim=1)
p2[64], p2[65], p2[66], p2[67], p2[68], p2[69], p2[70], p2[71], p2[72], p2[73], p2[74], p2[75], p2[76], p2[77], p2[78], p2[79] = torch.chunk(t[4], 16, dim=1)
p2[80], p2[81], p2[82], p2[83], p2[84], p2[85], p2[86], p2[87], p2[88], p2[89], p2[90], p2[91], p2[92], p2[93], p2[94], p2[95] = torch.chunk(t[5], 16, dim=1)
p2[96], p2[97], p2[98], p2[99], p2[100], p2[101], p2[102], p2[103], p2[104], p2[105], p2[106], p2[107], p2[108], p2[109], p2[110], p2[111] = torch.chunk(t[6], 16, dim=1)
p2[112], p2[113], p2[114], p2[115], p2[116], p2[117], p2[118], p2[119], p2[120], p2[121], p2[122], p2[123], p2[124], p2[125], p2[126], p2[127] = torch.chunk(t[7], 16, dim=1)
p2[128], p2[129], p2[130], p2[131], p2[132], p2[133], p2[134], p2[135], p2[136], p2[137], p2[138], p2[139], p2[140], p2[141], p2[142], p2[143] = torch.chunk(t[8], 16, dim=1)
p2[144], p2[145], p2[146], p2[147], p2[148], p2[149], p2[150], p2[151], p2[152], p2[153], p2[154], p2[155], p2[156], p2[157], p2[158], p2[159] = torch.chunk(t[9], 16, dim=1)
p2[160], p2[161], p2[162], p2[163], p2[164], p2[165], p2[166], p2[167], p2[168], p2[169], p2[170], p2[171], p2[172], p2[173], p2[174], p2[175] = torch.chunk(t[10], 16, dim=1)
p2[176], p2[177], p2[178], p2[179], p2[180], p2[181], p2[182], p2[183], p2[184], p2[185], p2[186], p2[187], p2[188], p2[189], p2[190], p2[191] = torch.chunk(t[11], 16, dim=1)
p2[192], p2[193], p2[194], p2[195], p2[196], p2[197], p2[198], p2[199], p2[200], p2[201], p2[202], p2[203], p2[204], p2[205], p2[206], p2[207] = torch.chunk(t[12], 16, dim=1)
p2[208], p2[209], p2[210], p2[211], p2[212], p2[213], p2[214], p2[215], p2[216], p2[217], p2[218], p2[219], p2[220], p2[221], p2[222], p2[223] = torch.chunk(t[13], 16, dim=1)
p2[224], p2[225], p2[226], p2[227], p2[228], p2[229], p2[230], p2[231], p2[232], p2[233], p2[234], p2[235], p2[236], p2[237], p2[238], p2[239] = torch.chunk(t[14], 16, dim=1)
p2[240], p2[241], p2[242], p2[243], p2[244], p2[245], p2[246], p2[247], p2[248], p2[249], p2[250], p2[251], p2[252], p2[253], p2[254], p2[255] = torch.chunk(t[15], 16, dim=1)
p3[0], p3[1], p3[2], p3[3], p3[4], p3[5], p3[6], p3[7], p3[8], p3[9], p3[10], p3[11], p3[12], p3[13], p3[14], p3[15] = torch.chunk(t2[0], 16, dim=1)
p3[16], p3[17], p3[18], p3[19], p3[20], p3[21], p3[22], p3[23], p3[24], p3[25], p3[26], p3[27], p3[28], p3[29], p3[30], p3[31] = torch.chunk(t2[1], 16, dim=1)
p3[32], p3[33], p3[34], p3[35], p3[36], p3[37], p3[38], p3[39], p3[40], p3[41], p3[42], p3[43], p3[44], p3[45], p3[46], p3[47] = torch.chunk(t2[2], 16, dim=1)
p3[48], p3[49], p3[50], p3[51], p3[52], p3[53], p3[54], p3[55], p3[56], p3[57], p3[58], p3[59], p3[60], p3[61], p3[62], p3[63] = torch.chunk(t2[3], 16, dim=1)
p3[64], p3[65], p3[66], p3[67], p3[68], p3[69], p3[70], p3[71], p3[72], p3[73], p3[74], p3[75], p3[76], p3[77], p3[78], p3[79] = torch.chunk(t2[4], 16, dim=1)
p3[80], p3[81], p3[82], p3[83], p3[84], p3[85], p3[86], p3[87], p3[88], p3[89], p3[90], p3[91], p3[92], p3[93], p3[94], p3[95] = torch.chunk(t2[5], 16, dim=1)
p3[96], p3[97], p3[98], p3[99], p3[100], p3[101], p3[102], p3[103], p3[104], p3[105], p3[106], p3[107], p3[108], p3[109], p3[110], p3[111] = torch.chunk(t2[6], 16, dim=1)
p3[112], p3[113], p3[114], p3[115], p3[116], p3[117], p3[118], p3[119], p3[120], p3[121], p3[122], p3[123], p3[124], p3[125], p3[126], p3[127] = torch.chunk(t2[7], 16, dim=1)
p3[128], p3[129], p3[130], p3[131], p3[132], p3[133], p3[134], p3[135], p3[136], p3[137], p3[138], p3[139], p3[140], p3[141], p3[142], p3[143] = torch.chunk(t2[8], 16, dim=1)
p3[144], p3[145], p3[146], p3[147], p3[148], p3[149], p3[150], p3[151], p3[152], p3[153], p3[154], p3[155], p3[156], p3[157], p3[158], p3[159] = torch.chunk(t2[9], 16, dim=1)
p3[160], p3[161], p3[162], p3[163], p3[164], p3[165], p3[166], p3[167], p3[168], p3[169], p3[170], p3[171], p3[172], p3[173], p3[174], p3[175] = torch.chunk(t2[10], 16, dim=1)
p3[176], p3[177], p3[178], p3[179], p3[180], p3[181], p3[182], p3[183], p3[184], p3[185], p3[186], p3[187], p3[188], p3[189], p3[190], p3[191] = torch.chunk(t2[11], 16, dim=1)
p3[192], p3[193], p3[194], p3[195], p3[196], p3[197], p3[198], p3[199], p3[200], p3[201], p3[202], p3[203], p3[204], p3[205], p3[206], p3[207] = torch.chunk(t2[12], 16, dim=1)
p3[208], p3[209], p3[210], p3[211], p3[212], p3[213], p3[214], p3[215], p3[216], p3[217], p3[218], p3[219], p3[220], p3[221], p3[222], p3[223] = torch.chunk(t2[13], 16, dim=1)
p3[224], p3[225], p3[226], p3[227], p3[228], p3[229], p3[230], p3[231], p3[232], p3[233], p3[234], p3[235], p3[236], p3[237], p3[238], p3[239] = torch.chunk(t2[14], 16, dim=1)
p3[240], p3[241], p3[242], p3[243], p3[244], p3[245], p3[246], p3[247], p3[248], p3[249], p3[250], p3[251], p3[252], p3[253], p3[254], p3[255] = torch.chunk(t2[15], 16, dim=1)
p4[0], p4[1], p4[2], p4[3], p4[4], p4[5], p4[6], p4[7], p4[8], p4[9], p4[10], p4[11], p4[12], p4[13], p4[14], p4[15] = torch.chunk(t3[0], 16, dim=1)
p4[16], p4[17], p4[18], p4[19], p4[20], p4[21], p4[22], p4[23], p4[24], p4[25], p4[26], p4[27], p4[28], p4[29], p4[30], p4[31] = torch.chunk(t3[1], 16, dim=1)
p4[32], p4[33], p4[34], p4[35], p4[36], p4[37], p4[38], p4[39], p4[40], p4[41], p4[42], p4[43], p4[44], p4[45], p4[46], p4[47] = torch.chunk(t3[2], 16, dim=1)
p4[48], p4[49], p4[50], p4[51], p4[52], p4[53], p4[54], p4[55], p4[56], p4[57], p4[58], p4[59], p4[60], p4[61], p4[62], p4[63] = torch.chunk(t3[3], 16, dim=1)
p4[64], p4[65], p4[66], p4[67], p4[68], p4[69], p4[70], p4[71], p4[72], p4[73], p4[74], p4[75], p4[76], p4[77], p4[78], p4[79] = torch.chunk(t3[4], 16, dim=1)
p4[80], p4[81], p4[82], p4[83], p4[84], p4[85], p4[86], p4[87], p4[88], p4[89], p4[90], p4[91], p4[92], p4[93], p4[94], p4[95] = torch.chunk(t3[5], 16, dim=1)
p4[96], p4[97], p4[98], p4[99], p4[100], p4[101], p4[102], p4[103], p4[104], p4[105], p4[106], p4[107], p4[108], p4[109], p4[110], p4[111] = torch.chunk(t3[6], 16, dim=1)
p4[112], p4[113], p4[114], p4[115], p4[116], p4[117], p4[118], p4[119], p4[120], p4[121], p4[122], p4[123], p4[124], p4[125], p4[126], p4[127] = torch.chunk(t3[7], 16, dim=1)
p4[128], p4[129], p4[130], p4[131], p4[132], p4[133], p4[134], p4[135], p4[136], p4[137], p4[138], p4[139], p4[140], p4[141], p4[142], p4[143] = torch.chunk(t3[8], 16, dim=1)
p4[144], p4[145], p4[146], p4[147], p4[148], p4[149], p4[150], p4[151], p4[152], p4[153], p4[154], p4[155], p4[156], p4[157], p4[158], p4[159] = torch.chunk(t3[9], 16, dim=1)
p4[160], p4[161], p4[162], p4[163], p4[164], p4[165], p4[166], p4[167], p4[168], p4[169], p4[170], p4[171], p4[172], p4[173], p4[174], p4[175] = torch.chunk(t3[10], 16, dim=1)
p4[176], p4[177], p4[178], p4[179], p4[180], p4[181], p4[182], p4[183], p4[184], p4[185], p4[186], p4[187], p4[188], p4[189], p4[190], p4[191] = torch.chunk(t3[11], 16, dim=1)
p4[192], p4[193], p4[194], p4[195], p4[196], p4[197], p4[198], p4[199], p4[200], p4[201], p4[202], p4[203], p4[204], p4[205], p4[206], p4[207] = torch.chunk(t3[12], 16, dim=1)
p4[208], p4[209], p4[210], p4[211], p4[212], p4[213], p4[214], p4[215], p4[216], p4[217], p4[218], p4[219], p4[220], p4[221], p4[222], p4[223] = torch.chunk(t3[13], 16, dim=1)
p4[224], p4[225], p4[226], p4[227], p4[228], p4[229], p4[230], p4[231], p4[232], p4[233], p4[234], p4[235], p4[236], p4[237], p4[238], p4[239] = torch.chunk(t3[14], 16, dim=1)
p4[240], p4[241], p4[242], p4[243], p4[244], p4[245], p4[246], p4[247], p4[248], p4[249], p4[250], p4[251], p4[252], p4[253], p4[254], p4[255] = torch.chunk(t3[15], 16, dim=1)
p2_tensor = pad_sequence(p2, batch_first=True)
p3_tensor = pad_sequence(p3, batch_first=True)
p4_tensor = pad_sequence(p4, batch_first=True)
cc = p2_tensor.unsqueeze(0)
cc2 = p3_tensor.unsqueeze(0)
cc3 = p4_tensor.unsqueeze(0)
p2_cuda = cc.to(torch.device("cuda"))
p3_cuda = cc2.to(torch.device("cuda"))
p4_cuda = cc3.to(torch.device("cuda"))
aug = T.ResizeShortestEdge(
# [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
# [480, 480], cfg.INPUT.MAX_SIZE_TEST
[768, 768], cfg.INPUT.MAX_SIZE_TEST
)
image = cv2.imread(opt.valid_data_path + '000000'+ image_file_number +'.jpg')
height, width = image.shape[:2]
image = aug.get_transform(image).apply_image(image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = [{"image": image, "height": height, "width": width}]
with torch.no_grad():
images = model.preprocess_image(inputs) # don't forget to preprocess
features = model.backbone(images.tensor) # set of cnn features
features['p2'] = p2_cuda
features['p3'] = p3_cuda
features['p4'] = p4_cuda
proposals, _ = model.proposal_generator(images, features, None) # RPN
features_ = [features[f] for f in model.roi_heads.box_in_features]
box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates
predictions = model.roi_heads.box_predictor(box_features)
pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals)
pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances)
# output boxes, masks, scores, etc
pred_instances = model._postprocess(pred_instances, inputs, images.image_sizes) # scale box to orig size
# features of the proposed boxes
feats = box_features[pred_inds]
pred_category = pred_instances[0]["instances"].pred_classes.to("cpu")
pred_segmentation = pred_instances[0]["instances"].pred_masks.to("cpu")
pred_score = pred_instances[0]["instances"].scores.to("cpu")
xxx = pred_category
xxx = xxx.numpy()
xxx = xxx + 1
for idx in range(len(xxx)):
if -1 < int(xxx[idx]) < 12:
xxx[idx] = xxx[idx]
elif 11 < int(xxx[idx]) < 25:
xxx[idx] = xxx[idx] + 1
elif 24 < int(xxx[idx]) < 27:
xxx[idx] = xxx[idx] + 2
elif 26 < int(xxx[idx]) < 41:
xxx[idx] = xxx[idx] + 4
elif 40 < int(xxx[idx]) < 61:
xxx[idx] = xxx[idx] + 5
elif 60 < int(xxx[idx]) < 62:
xxx[idx] = 67
elif 61 < int(xxx[idx]) < 63:
xxx[idx] = 70
elif 62 < int(xxx[idx]) < 74:
xxx[idx] = xxx[idx] + 9
else:
xxx[idx] = xxx[idx] + 10
imgID = int(image_file_number)
if image_idx == 0:
anns = []
else:
anns = anns
for idx in range(len(pred_category.numpy())):
anndata = {}
anndata['image_id'] = imgID
anndata['category_id'] = int(xxx[idx])
anndata['segmentation'] = encode(np.asfortranarray(pred_segmentation[idx].numpy()))
anndata['score'] = float(pred_score[idx].numpy())
anns.append(anndata)
image_idx = image_idx + 1
# print("###image###:{}".format(image_idx))
annType = ['segm','bbox','keypoints']
annType = annType[0] #specify type here
prefix = 'instances'
print('Running demo for *%s* results.'%(annType))
# imgIds = [560474]
annFile = './instances_val2017_dataset100.json'
cocoGt=COCO(annFile)
#initialize COCO detections api
resFile = anns
cocoDt=cocoGt.loadRes(resFile)
# running evaluation
cocoEval = COCOeval(cocoGt,cocoDt,annType)
# cocoEval.params.imgIds = imgIds
# 맨 윗줄
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
\ No newline at end of file
import os
from PIL import Image
def crop_feature(datapath, feature_type, scale_factor, print_message=False):
data_path = datapath
datatype = feature_type
rescale_factor = scale_factor
if not os.path.exists(data_path):
raise Exception(f"[!] {data_path} not existed")
hr_imgs = []
w, h = Image.open(datapath).size
width = int(w / 16)
height = int(h / 16)
lwidth = int(width / rescale_factor)
lheight = int(height / rescale_factor)
if print_message:
print("lr: ({} {}), hr: ({} {})".format(lwidth, lheight, width, height))
hr_image = Image.open(datapath) # .convert('RGB')\
for i in range(16):
for j in range(16):
(left, upper, right, lower) = (
i * width, j * height, (i + 1) * width, (j + 1) * height)
crop = hr_image.crop((left, upper, right, lower))
crop = crop.resize((lwidth,lheight), Image.BICUBIC)
crop = crop.resize((width, height), Image.BICUBIC)
hr_imgs.append(crop)
return hr_imgs
\ No newline at end of file
......@@ -4,64 +4,87 @@ import os
from glob import glob
from torchvision import transforms
from torch.utils.data.dataset import Dataset
from torchvision import transforms
import torch
import pdb
import math
import numpy as np
class FeatureDataset(Dataset):
def __init__(self, data_path, datatype, rescale_factor,valid):
def __init__(self, data_path, datatype, rescale_factor, valid):
self.data_path = data_path
self.datatype = datatype
self.rescale_factor = rescale_factor
if not os.path.exists(data_path):
raise Exception(f"[!] {self.data_path} not existed")
if(valid):
self.hr_path = os.path.join(self.data_path,'valid')
self.hr_path = os.path.join(self.hr_path,self.datatype)
if (valid):
self.hr_path = os.path.join(self.data_path, 'valid')
self.hr_path = os.path.join(self.hr_path, self.datatype)
else:
self.hr_path = os.path.join(self.data_path,'LR_2')
self.hr_path = os.path.join(self.hr_path,self.datatype)
self.hr_path = os.path.join(self.data_path, 'LR_2')
self.hr_path = os.path.join(self.hr_path, self.datatype)
print(self.hr_path)
self.hr_path = sorted(glob(os.path.join(self.hr_path, "*.*")))
self.hr_imgs = []
w,h = Image.open(self.hr_path[0]).size
self.width = int(w/16)
self.height = int(h/16)
self.lwidth = int(self.width/self.rescale_factor)
self.lheight = int(self.height/self.rescale_factor)
print("lr: ({} {}), hr: ({} {})".format(self.lwidth,self.lheight,self.width,self.height))
for hr in self.hr_path:
hr_image = Image.open(hr)#.convert('RGB')\
w, h = Image.open(self.hr_path[0]).size
self.width = int(w / 16)
self.height = int(h / 16)
self.lwidth = int(self.width / self.rescale_factor) # rescale_factor만큼 크기를 줄인다.
self.lheight = int(self.height / self.rescale_factor)
print("lr: ({} {}), hr: ({} {})".format(self.lwidth, self.lheight, self.width, self.height))
for hr in self.hr_path: # 256개의 피쳐로 나눈다.
hr_image = Image.open(hr) # .convert('RGB')\
for i in range(16):
for j in range(16):
(left,upper,right,lower) = (i*self.width,j*self.height,(i+1)*self.width,(j+1)*self.height)
crop = hr_image.crop((left,upper,right,lower))
(left, upper, right, lower) = (
i * self.width, j * self.height, (i + 1) * self.width, (j + 1) * self.height)
crop = hr_image.crop((left, upper, right, lower))
self.hr_imgs.append(crop)
def __getitem__(self, idx):
hr_image = self.hr_imgs[idx]
transform = transforms.Compose([
transforms.Resize((self.lheight,self.lwidth),3),
transforms.Resize((self.lheight, self.lwidth), Image.BICUBIC),
transforms.Resize((self.height, self.width), Image.BICUBIC),
transforms.ToTensor()
])
return transform(hr_image), transforms.ToTensor()(hr_image)
return transform(hr_image), transforms.ToTensor()(hr_image) # hr_image를 변환한 것과, 변환하지 않은 것을 Tensor로 각각 반환
def __len__(self):
return len(self.hr_path*16*16)
return len(self.hr_path * 16 * 16)
def get_data_loader_test_version(data_path, feature_type, rescale_factor, batch_size, num_workers):
full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False)
print("dataset의 사이즈는 {}".format(len(full_dataset)))
for f in full_dataset:
print(type(f))
def get_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers):
full_dataset = FeatureDataset(data_path,feature_type,rescale_factor,False)
full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False)
train_size = int(0.9 * len(full_dataset))
test_size = len(full_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
torch.manual_seed(3334)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True,num_workers=num_workers, pin_memory=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=False,num_workers=num_workers, pin_memory=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
return train_loader, test_loader
def get_training_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers):
full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False)
torch.manual_seed(3334)
train_loader = torch.utils.data.DataLoader(dataset=full_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=False)
return train_loader
def get_infer_dataloader(data_path, feature_type, rescale_factor, batch_size, num_workers):
dataset = FeatureDataset(data_path,feature_type,rescale_factor,True)
data_loader = torch.utils.data.DataLoader(dataset=dataset,batch_size=batch_size,shuffle=False,num_workers=num_workers,pin_memory=False)
dataset = FeatureDataset(data_path, feature_type, rescale_factor, True)
data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=False)
return data_loader
\ No newline at end of file
......
import argparse, os
import torch
from torch.autograd import Variable
import numpy as np
import time, math, glob
import scipy.io as sio
from crop_feature import crop_feature
from PIL import Image
import cv2
from matplotlib import pyplot as plt
from math import log10, sqrt
parser = argparse.ArgumentParser(description="PyTorch VDSR Eval")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="model/model_epoch_50.pth", type=str, help="model path")
parser.add_argument("--dataset", default="Set5", type=str, help="dataset name, Default: Set5")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
parser.add_argument("--featureType", default="p3", type=str)
parser.add_argument("--scaleFactor", default=4, type=int, help="scale factor")
parser.add_argument("--singleImage", type=str, default="N", help="if it is a single image, enter \"y\"")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def concatFeatures(features, image_name, bicubic=False):
features_0 = features[:16]
features_1 = features[16:32]
features_2 = features[32:48]
features_3 = features[48:64]
features_4 = features[64:80]
features_5 = features[80:96]
features_6 = features[96:112]
features_7 = features[112:128]
features_8 = features[128:144]
features_9 = features[144:160]
features_10 = features[160:176]
features_11 = features[176:192]
features_12 = features[192:208]
features_13 = features[208:224]
features_14 = features[224:240]
features_15 = features[240:256]
features_new = list()
features_new.extend([
concat_vertical(features_0),
concat_vertical(features_1),
concat_vertical(features_2),
concat_vertical(features_3),
concat_vertical(features_4),
concat_vertical(features_5),
concat_vertical(features_6),
concat_vertical(features_7),
concat_vertical(features_8),
concat_vertical(features_9),
concat_vertical(features_10),
concat_vertical(features_11),
concat_vertical(features_12),
concat_vertical(features_13),
concat_vertical(features_14),
concat_vertical(features_15)
])
final_concat_feature = concat_horizontal(features_new)
if bicubic:
save_path = "features/LR_2/LR/" + opt.featureType + "/" + image_name
if not os.path.exists("features/"):
os.makedirs("features/")
if not os.path.exists("features/LR_2/"):
os.makedirs("features/LR_2/")
if not os.path.exists("features/LR_2/LR/"):
os.makedirs("features/LR_2/LR/")
if not os.path.exists("features/LR_2/LR/" + opt.featureType):
os.makedirs("features/LR_2/LR/" + opt.featureType)
cv2.imwrite(save_path, final_concat_feature)
else:
save_path = "features/LR_2/" + opt.featureType + "/" + image_name
if not os.path.exists("features/"):
os.makedirs("features/")
if not os.path.exists("features/LR_2/"):
os.makedirs("features/LR_2/")
if not os.path.exists("features/LR_2/" + opt.featureType):
os.makedirs("features/LR_2/" + opt.featureType)
cv2.imwrite(save_path, final_concat_feature)
def concat_horizontal(feature):
result = cv2.hconcat([feature[0], feature[1]])
for i in range(2, len(feature)):
result = cv2.hconcat([result, feature[i]])
return result
def concat_vertical(feature):
result = cv2.vconcat([feature[0], feature[1]])
for i in range(2, len(feature)):
result = cv2.vconcat([result, feature[i]])
return result
opt = parser.parse_args()
cuda = opt.cuda
if cuda:
print("=> use gpu id: '{}'".format(opt.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
if not torch.cuda.is_available():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model"]
scales = [opt.scaleFactor]
# image_list = glob.glob(opt.dataset+"/*.*")
if opt.singleImage == "Y" :
# image_list = crop_feature(opt.dataset, opt.featureType, opt.scaleFactor)
image_list = opt.dataset
else:
image_path = os.path.join(opt.dataset, opt.featureType)
image_list = os.listdir(image_path)
print(image_path)
print(image_list)
for scale in scales:
for image in image_list:
avg_psnr_predicted = 0.0
avg_psnr_bicubic = 0.0
avg_elapsed_time = 0.0
count = 0.0
image_name_cropped = crop_feature(os.path.join(image_path, image), opt.featureType, opt.scaleFactor)
features = []
features_bicubic = []
for image_name in image_name_cropped:
count += 1
f_gt = image_name
w, h = image_name.size
f_bi = image_name.resize((w//scale,h//scale), Image.BICUBIC)
f_bi = f_bi.resize((w,h), Image.BICUBIC)
f_gt = np.array(f_gt)
f_bi = np.array(f_bi)
f_gt = f_gt.astype(float)
f_bi = f_bi.astype(float)
features_bicubic.append(f_bi)
psnr_bicubic = PSNR(f_bi, f_gt, shave_border=scale)
# psnr_bicubic = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_bi))
avg_psnr_bicubic += psnr_bicubic
f_input = f_bi/255.
f_input = Variable(torch.from_numpy(f_input).float()).view(1, -1, f_input.shape[0], f_input.shape[1])
if cuda:
model = model.cuda()
f_input = f_input.cuda()
else:
model = model.cpu()
start_time = time.time()
SR = model(f_input)
elapsed_time = time.time() - start_time
avg_elapsed_time += elapsed_time
SR = SR.cpu()
f_sr = SR.data[0].numpy().astype(np.float32)
f_sr = f_sr * 255
f_sr[f_sr<0] = 0
f_sr[f_sr>255.] = 255.
f_sr = f_sr[0,:,:]
psnr_predicted = PSNR(f_sr, f_gt, shave_border=scale)
# psnr_predicted = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_sr))
avg_psnr_predicted += psnr_predicted
features.append(f_sr)
concatFeatures(features, image)
concatFeatures(features_bicubic, image, True)
print("Scale=", scale)
print("Dataset=", opt.dataset)
print("Average PSNR_predicted=", avg_psnr_predicted/count)
print("Average PSNR_bicubic=", avg_psnr_bicubic/count)
# Show graph
# f_gt = Image.fromarray(f_gt)
# f_b = Image.fromarray(f_bi)
# f_sr = Image.fromarray(f_sr)
# fig = plt.figure(figsize=(18, 16), dpi= 80)
# ax = plt.subplot("131")
# ax.imshow(f_gt)
# ax.set_title("GT")
# ax = plt.subplot("132")
# ax.imshow(f_bi)
# ax.set_title("Input(bicubic)")
# ax = plt.subplot("133")
# ax.imshow(f_sr)
# ax.set_title("Output(vdsr)")
# plt.show()
This diff could not be displayed because it is too large.
import argparse, os
from datasets import get_data_loader
import torch
import random
import torch.backends.cudnn as cudnn
......@@ -7,33 +8,37 @@ import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from vdsr import Net
from dataset import DatasetFromHdf5
## Custom
from data import FeatureDataset
from datasets import get_training_data_loader
# from datasets import get_data_loader_test_version
# from feature_dataset import get_training_data_loader
# from make_dataset import make_dataset
import numpy as np
from dataFromH5 import Read_dataset_h5
import matplotlib.pyplot as plt
import math
# Training settings
parser = argparse.ArgumentParser(description="PyTorch VDSR")
parser.add_argument("--batchSize", type=int, default=128, help="Training batch size")
parser.add_argument("--nEpochs", type=int, default=50, help="Number of epochs to train for")
parser.add_argument("--lr", type=float, default=0.1, help="Learning Rate. Default=0.1")
parser.add_argument("--dataRoot", type=str)
parser.add_argument("--featureType", type=str)
parser.add_argument("--scaleFactor", type=int, default=4)
parser.add_argument("--batchSize", type=int, default=64, help="Training batch size")
parser.add_argument("--nEpochs", type=int, default=20, help="Number of epochs to train for")
parser.add_argument("--lr", type=float, default=0.001, help="Learning Rate. Default=0.1")
parser.add_argument("--step", type=int, default=10, help="Sets the learning rate to the initial LR decayed by momentum every n epochs, Default: n=10")
parser.add_argument("--cuda", action="store_true", help="Use cuda?")
parser.add_argument("--resume", default="", type=str, help="Path to checkpoint (default: none)")
parser.add_argument("--start-epoch", default=1, type=int, help="Manual epoch number (useful on restarts)")
parser.add_argument("--clip", type=float, default=0.4, help="Clipping Gradients. Default=0.4")
# 1->3 custom
parser.add_argument("--threads", type=int, default=3, help="Number of threads for data loader to use, Default: 1")
parser.add_argument("--threads", type=int, default=1, help="Number of threads for data loader to use, Default: 1")
parser.add_argument("--momentum", default=0.9, type=float, help="Momentum, Default: 0.9")
parser.add_argument("--weight-decay", "--wd", default=1e-4, type=float, help="Weight decay, Default: 1e-4")
parser.add_argument('--pretrained', default='', type=str, help='path to pretrained model (default: none)')
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
## custom
parser.add_argument("--dataPath", type=str)
parser.add_argument("--featureType", type=str, default="p2")
parser.add_argument("--scaleFactor",type=int, default=4)
# parser.add_argument("--trainingData", type=DataLoader)
total_loss_for_plot = list()
total_pnsr = list()
def main():
global opt, model
......@@ -55,21 +60,17 @@ def main():
cudnn.benchmark = True
print("===> Loading datasets")
################## Loading Datasets ##########################
if os.path.isfile('dataloader/training_data_loader.pth'):
training_data_loader = torch.load('dataloader/training_data_loader.pth')
else:
train_set = FeatureDataset(opt.dataPath,opt.featureType,opt.scaleFactor,False)
train_size = 100 #우선은 100개만
test_size = len(train_set) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(train_set, [train_size, test_size])
training_data_loader = DataLoader(dataset=train_dataset, num_workers=3, batch_size=8, shuffle=True, pin_memory=False)
torch.save(training_data_loader, 'dataloader/training_data_loader.pth'.format(DataLoader))
print("===> Loading datasets")
# train_set = DatasetFromHdf5("data/train.h5")
# training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
training_data_loader = get_training_data_loader(opt.dataRoot, opt.featureType, opt.scaleFactor, opt.batchSize, opt.threads)
# training_data_loader = make_dataset(opt.dataRoot, opt.featureType, opt.scaleFactor, opt.batchSize, opt.threads)
print("===> Building model")
model = Net(opt.scaleFactor)
criterion = nn.MSELoss(size_average=False)
model = Net()
criterion = nn.MSELoss(reduction='sum')
print("===> Setting GPU")
if cuda:
......@@ -91,6 +92,7 @@ def main():
if os.path.isfile(opt.pretrained):
print("=> loading model '{}'".format(opt.pretrained))
weights = torch.load(opt.pretrained)
opt.start_epoch = weights["epoch"] + 1
model.load_state_dict(weights['model'].state_dict())
else:
print("=> no model found at '{}'".format(opt.pretrained))
......@@ -99,15 +101,21 @@ def main():
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
print("===> Training")
for epoch in range(opt.start_epoch, opt.nEpochs + 1):
train(training_data_loader, optimizer, model, criterion, epoch)
save_checkpoint(model, epoch)
save_checkpoint(model, epoch, optimizer)
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 10 epochs"""
lr = opt.lr * (0.1 ** (epoch // opt.step))
return lr
def PSNR(loss):
psnr = 10 * np.log10(1 / (loss + 1e-10))
# psnr = 20 * math.log10(255.0 / (math.sqrt(loss)))
return psnr
def train(training_data_loader, optimizer, model, criterion, epoch):
lr = adjust_learning_rate(optimizer, epoch-1)
......@@ -119,25 +127,31 @@ def train(training_data_loader, optimizer, model, criterion, epoch):
model.train()
for iteration, batch in enumerate(training_data_loader, 1):
input, target = Variable(batch[0]), Variable(batch[1], requires_grad=False)
optimizer.zero_grad()
input, target = Variable(batch[0], requires_grad=False), Variable(batch[1], requires_grad=False)
total_loss = 0
if opt.cuda:
input = input.cuda()
target = target.cuda()
loss = criterion(model(input), target)
optimizer.zero_grad()
total_loss += loss.item()
loss.backward()
nn.utils.clip_grad_norm(model.parameters(),opt.clip)
nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
optimizer.step()
if iteration%10 == 0:
# loss.data[0] --> loss.data
print("===> Epoch[{}]({}/{}): Loss: {:.10f}".format(epoch, iteration, len(training_data_loader), loss.data))
def save_checkpoint(model, epoch):
model_out_path = "checkpoint/" + "model_epoch_{}.pth".format(epoch)
state = {"epoch": epoch ,"model": model}
epoch_loss = total_loss / len(training_data_loader)
total_loss_for_plot.append(epoch_loss)
psnr = PSNR(epoch_loss)
total_pnsr.append(psnr)
print("===> Epoch[{}]: loss : {:.10f} ,PSNR : {:.10f}".format(epoch, epoch_loss, psnr))
# if iteration%100 == 0:
# print("===> Epoch[{}]({}/{}): Loss: {:.10f}".format(epoch, iteration, len(training_data_loader), loss.item()))
def save_checkpoint(model, epoch, optimizer):
model_out_path = "checkpoint/" + "model_epoch_{}_{}.pth".format(epoch, opt.featureType)
state = {"epoch": epoch ,"model": model, "model_state_dict":model.state_dict(), "optimizer_state_dict":optimizer.state_dict(),
"loss": total_loss_for_plot, "psnr":total_pnsr}
if not os.path.exists("checkpoint/"):
os.makedirs("checkpoint/")
......
......@@ -12,12 +12,11 @@ class Conv_ReLU_Block(nn.Module):
return self.relu(self.conv(x))
class Net(nn.Module):
def __init__(self,upscale_factor):
def __init__(self):
super(Net, self).__init__()
self.residual_layer = self.make_layer(Conv_ReLU_Block, 18)
self.input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.output = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
self.upsample = nn.Upsample(scale_factor=upscale_factor, mode='bicubic')
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
......@@ -32,7 +31,6 @@ class Net(nn.Module):
return nn.Sequential(*layers)
def forward(self, x):
x = self.upsample(x)
residual = x
out = self.relu(self.input(x))
out = self.residual_layer(out)
......
No preview for this file type
No preview for this file type