서민정

feat: 코드 업로드 및 최종보고서, ppt 업로드

......@@ -45,9 +45,8 @@ from pycocotools.mask import encode
import argparse
parser = argparse.ArgumentParser(description="PyTorch CARN")
parser.add_argument("--data_path", type=str, default = "/home/ubuntu/JH/exp1/dataset")
parser.add_argument("--valid_data_path", type=str)
parser.add_argument("--rescale_factor", type=int, default=4, help="rescale factor for using in training")
parser.add_argument("--rescale_factor", type=int, help="rescale factor for using in training")
parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training")
parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training")
parser.add_argument('--batch_size', type=int, default=256)
......@@ -153,9 +152,12 @@ for iter in range(0, 100):
globals()['maxRange_{}'.format(image_file_number)] = maxRange
# p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png'
p2_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p2/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p2' +'.png')
p3_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p3/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p3' +'.png')
p4_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p4/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p4' +'.png')
# p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png'
# p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter)))
p2_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p2/' + image_prefix + '000000' + image_file_number + '_p2' +'.png')
# # y_p2, cb, cr = p2_feature_img.split()
p2_feature_arr = np.array(p2_feature_img)
p2_feature_arr_round = myRound(p2_feature_arr)
......@@ -163,14 +165,14 @@ for iter in range(0, 100):
# p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png')
# p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter)))
p3_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p3/' + image_prefix + '000000' + image_file_number + '_p3' +'.png')
# # y_p3, cb2, cr2 = p3_feature_img.split()
p3_feature_arr = np.array(p3_feature_img)
p3_feature_arr_round = myRound(p3_feature_arr)
# p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png')
# p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter)))
p4_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p4/' + image_prefix + '000000' + image_file_number + '_p4' +'.png')
# y_p4, cb3, cr3 = p4_feature_img.split()
p4_feature_arr = np.array(p4_feature_img)
p4_feature_arr_round = myRound(p4_feature_arr)
......
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
No preview for this file type
......@@ -117,9 +117,7 @@ model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model"
scales = [opt.scaleFactor]
# image_list = glob.glob(opt.dataset+"/*.*")
if opt.singleImage == "Y" :
# image_list = crop_feature(opt.dataset, opt.featureType, opt.scaleFactor)
image_list = opt.dataset
else:
image_path = os.path.join(opt.dataset, opt.featureType)
......@@ -150,7 +148,6 @@ for scale in scales:
f_bi = f_bi.astype(float)
features_bicubic.append(f_bi)
psnr_bicubic = PSNR(f_bi, f_gt, shave_border=scale)
# psnr_bicubic = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_bi))
avg_psnr_bicubic += psnr_bicubic
f_input = f_bi/255.
......@@ -177,7 +174,6 @@ for scale in scales:
f_sr = f_sr[0,:,:]
psnr_predicted = PSNR(f_sr, f_gt, shave_border=scale)
# psnr_predicted = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_sr))
avg_psnr_predicted += psnr_predicted
features.append(f_sr)
......@@ -187,23 +183,3 @@ for scale in scales:
print("Dataset=", opt.dataset)
print("Average PSNR_predicted=", avg_psnr_predicted/count)
print("Average PSNR_bicubic=", avg_psnr_bicubic/count)
# Show graph
# f_gt = Image.fromarray(f_gt)
# f_b = Image.fromarray(f_bi)
# f_sr = Image.fromarray(f_sr)
# fig = plt.figure(figsize=(18, 16), dpi= 80)
# ax = plt.subplot("131")
# ax.imshow(f_gt)
# ax.set_title("GT")
# ax = plt.subplot("132")
# ax.imshow(f_bi)
# ax.set_title("Input(bicubic)")
# ax = plt.subplot("133")
# ax.imshow(f_sr)
# ax.set_title("Output(vdsr)")
# plt.show()
......
......@@ -13,7 +13,7 @@ from datasets import get_training_data_loader
# from feature_dataset import get_training_data_loader
# from make_dataset import make_dataset
import numpy as np
from dataFromH5 import Read_dataset_h5
# from dataFromH5 import Read_dataset_h5
import matplotlib.pyplot as plt
import math
......
import torch.utils.data as data
import torch
import h5py
class Read_dataset_h5(data.Dataset):
def __init__(self, file_path):
super(Read_dataset_h5, self).__init__()
hf = h5py.File(file_path)
self.input = hf.get('input')
self.label = hf.get('label')
def __getitem__(self, index):
return torch.from_numpy(self.input[index,:,:,:]).float(), torch.from_numpy(self.label[index,:,:,:]).float()
def __len__(self):
return self.input.shape[0]
'''
현재 사용하지 않음.
'''
# from torch.utils.data import Dataset
# from PIL import Image
# import os
# from glob import glob
# from torchvision import transforms
# from torch.utils.data.dataset import Dataset
# import torch
# import pdb
# import math
# import numpy as np
# import cv2
# class FeatureDataset(Dataset):
# def __init__(self, data_path, datatype, rescale_factor, valid):
# self.data_path = data_path
# self.datatype = datatype
# self.rescale_factor = rescale_factor
# if not os.path.exists(data_path):
# raise Exception(f"[!] {self.data_path} not existed")
# if (valid):
# self.hr_path = os.path.join(self.data_path, 'valid')
# self.hr_path = os.path.join(self.hr_path, self.datatype)
# else:
# self.hr_path = os.path.join(self.data_path, 'LR_2')
# self.hr_path = os.path.join(self.hr_path, self.datatype)
# print(self.hr_path)
# self.names = os.listdir(self.hr_path)
# self.hr_path = sorted(glob(os.path.join(self.hr_path, "*.*")))
# self.hr_imgs = []
# w, h = Image.open(self.hr_path[0]).size
# self.width = int(w / 16)
# self.height = int(h / 16)
# self.lwidth = int(self.width / self.rescale_factor) # rescale_factor만큼 크기를 줄인다.
# self.lheight = int(self.height / self.rescale_factor)
# print("lr: ({} {}), hr: ({} {})".format(self.lwidth, self.lheight, self.width, self.height))
# self.original_hr_imgs = [] #원본 250개
# print("crop features ...")
# for hr in self.hr_path: # 256개의 피쳐로 나눈다.
# hr_cropped_imgs = []
# hr_image = Image.open(hr) # .convert('RGB')\
# self.original_hr_imgs.append(np.array(hr_image).astype(float)) # 원본을 저장한다.
# for i in range(16):
# for j in range(16):
# (left, upper, right, lower) = (
# i * self.width, j * self.height, (i + 1) * self.width, (j + 1) * self.height)
# crop = hr_image.crop((left, upper, right, lower))
# hr_cropped_imgs.append(crop)
# self.hr_imgs.append(hr_cropped_imgs)
# self.final_results = [] # [250개]
# print("resize and concat features ...")
# for i in range(0, len(self.hr_imgs)):
# hr_img = self.hr_imgs[i]
# interpolated_images = []
# for img in hr_img:
# image = img.resize((self.lwidth, self.lheight), Image.BICUBIC)
# image = image.resize((self.width, self.height), Image.BICUBIC)
# interpolated_images.append(np.array(image).astype(float))
# self.final_results.append(concatFeatures(interpolated_images, self.names[i], self.datatype))
# print(self.original_hr_imgs)
# print(self.final_results)
# def __getitem__(self, idx):
# ground_truth = self.original_hr_imgs[idx]
# final_result = self.final_results[idx] # list
# return transforms.ToTensor()(final_result), transforms.ToTensor()(ground_truth) # hr_image를 변환한 것과, 변환하지 않은 것을 Tensor로 각각 반환
# def __len__(self):
# return len(self.hr_path)
# def concatFeatures(features, image_name, feature_type):
# features_0 = features[:16]
# features_1 = features[16:32]
# features_2 = features[32:48]
# features_3 = features[48:64]
# features_4 = features[64:80]
# features_5 = features[80:96]
# features_6 = features[96:112]
# features_7 = features[112:128]
# features_8 = features[128:144]
# features_9 = features[144:160]
# features_10 = features[160:176]
# features_11 = features[176:192]
# features_12 = features[192:208]
# features_13 = features[208:224]
# features_14 = features[224:240]
# features_15 = features[240:256]
# features_new = list()
# features_new.extend([
# concat_vertical(features_0),
# concat_vertical(features_1),
# concat_vertical(features_2),
# concat_vertical(features_3),
# concat_vertical(features_4),
# concat_vertical(features_5),
# concat_vertical(features_6),
# concat_vertical(features_7),
# concat_vertical(features_8),
# concat_vertical(features_9),
# concat_vertical(features_10),
# concat_vertical(features_11),
# concat_vertical(features_12),
# concat_vertical(features_13),
# concat_vertical(features_14),
# concat_vertical(features_15)
# ])
# final_concat_feature = concat_horizontal(features_new)
# save_path = "features/LR_2/" + feature_type + "/" + image_name
# if not os.path.exists("features/"):
# os.makedirs("features/")
# if not os.path.exists("features/LR_2/"):
# os.makedirs("features/LR_2/")
# if not os.path.exists("features/LR_2/" + feature_type):
# os.makedirs("features/LR_2/" + feature_type)
# cv2.imwrite(save_path, final_concat_feature)
# return np.array(final_concat_feature).astype(float)
# def concat_horizontal(feature):
# result = cv2.hconcat([feature[0], feature[1]])
# for i in range(2, len(feature)):
# result = cv2.hconcat([result, feature[i]])
# return result
# def concat_vertical(feature):
# result = cv2.vconcat([feature[0], feature[1]])
# for i in range(2, len(feature)):
# result = cv2.vconcat([result, feature[i]])
# return result
# def get_data_loader_test_version(data_path, feature_type, rescale_factor, batch_size, num_workers):
# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False)
# print("dataset의 사이즈는 {}".format(len(full_dataset)))
# for f in full_dataset:
# print(type(f))
# def get_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers):
# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False)
# train_size = int(0.9 * len(full_dataset))
# test_size = len(full_dataset) - train_size
# train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size])
# torch.manual_seed(3334)
# train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True,
# num_workers=num_workers, pin_memory=False)
# test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
# num_workers=num_workers, pin_memory=True)
# return train_loader, test_loader
# def get_training_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers):
# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False)
# torch.manual_seed(3334)
# train_loader = torch.utils.data.DataLoader(dataset=full_dataset, batch_size=batch_size, shuffle=True,
# num_workers=num_workers, pin_memory=False)
# return train_loader
# def get_infer_dataloader(data_path, feature_type, rescale_factor, batch_size, num_workers):
# dataset = FeatureDataset(data_path, feature_type, rescale_factor, True)
# data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False,
# num_workers=num_workers, pin_memory=False)
# return data_loader
\ No newline at end of file
'''
현재 사용하지 않음.
'''
# from crop_feature import crop_feature
# import os
# from PIL import Image
# import numpy as np
# import torch
# from torch.utils.data.dataset import Dataset
# from torch.utils.data import TensorDataset, DataLoader
# from torchvision import transforms
# import cv2
# import glob
# import h5py
# import argparse
# parser = argparse.ArgumentParser(description="make Dataset")
# parser.add_argument("--dataset", type=str)
# parser.add_argument("--featureType", type=str)
# parser.add_argument("--scaleFactor", type=int)
# parser.add_argument("--batchSize", type=int, default=16)
# parser.add_argument("--threads", type=int, default=3)
# # dataset, feature_type, scale_factor, batch_size, num_workers
# def main():
# opt = parser.parse_args()
# dataset = opt.dataset
# feature_type = opt.featureType
# scale_factor = opt.scaleFactor
# batch_size = opt.batchSize
# num_workers = opt.threads
# print_message = True
# dataset = dataset+"/LR_2"
# image_path = os.path.join(dataset, feature_type)
# image_list = os.listdir(image_path)
# input = list()
# label = list()
# for image in image_list:
# origin_image = Image.open(os.path.join(image_path,image))
# label.append(np.array(origin_image).astype(float))
# image_cropped = crop_feature(os.path.join(image_path, image), feature_type, scale_factor, print_message)
# print_message = False
# # bicubic interpolation
# reconstructed_features = list()
# print("crop is done.")
# for crop in image_cropped:
# w, h = crop.size
# bicubic_interpolated_image = crop.resize((w//scale_factor, h//scale_factor), Image.BICUBIC)
# bicubic_interpolated_image = bicubic_interpolated_image.resize((w,h), Image.BICUBIC) # 다시 원래 크기로 키우기
# reconstructed_features.append(np.array(bicubic_interpolated_image).astype(float))
# input.append(concatFeatures(reconstructed_features, image, feature_type))
# print("concat is done.")
# if len(input) == len(label):
# save_h5(input, label, 'data/train_{}.h5'.format(feature_type))
# print("saved..")
# else:
# print(len(input), len(label), "이 다릅니다.")
# def concatFeatures(features, image_name, feature_type):
# features_0 = features[:16]
# features_1 = features[16:32]
# features_2 = features[32:48]
# features_3 = features[48:64]
# features_4 = features[64:80]
# features_5 = features[80:96]
# features_6 = features[96:112]
# features_7 = features[112:128]
# features_8 = features[128:144]
# features_9 = features[144:160]
# features_10 = features[160:176]
# features_11 = features[176:192]
# features_12 = features[192:208]
# features_13 = features[208:224]
# features_14 = features[224:240]
# features_15 = features[240:256]
# features_new = list()
# features_new.extend([
# concat_vertical(features_0),
# concat_vertical(features_1),
# concat_vertical(features_2),
# concat_vertical(features_3),
# concat_vertical(features_4),
# concat_vertical(features_5),
# concat_vertical(features_6),
# concat_vertical(features_7),
# concat_vertical(features_8),
# concat_vertical(features_9),
# concat_vertical(features_10),
# concat_vertical(features_11),
# concat_vertical(features_12),
# concat_vertical(features_13),
# concat_vertical(features_14),
# concat_vertical(features_15)
# ])
# final_concat_feature = concat_horizontal(features_new)
# save_path = "features/LR_2/" + feature_type + "/" + image_name
# if not os.path.exists("features/"):
# os.makedirs("features/")
# if not os.path.exists("features/LR_2/"):
# os.makedirs("features/LR_2/")
# if not os.path.exists("features/LR_2/" + feature_type):
# os.makedirs("features/LR_2/" + feature_type)
# cv2.imwrite(save_path, final_concat_feature)
# return np.array(final_concat_feature).astype(float)
# def concat_horizontal(feature):
# result = cv2.hconcat([feature[0], feature[1]])
# for i in range(2, len(feature)):
# result = cv2.hconcat([result, feature[i]])
# return result
# def concat_vertical(feature):
# result = cv2.vconcat([feature[0], feature[1]])
# for i in range(2, len(feature)):
# result = cv2.vconcat([result, feature[i]])
# return result
# def save_h5(sub_ip, sub_la, savepath):
# if not os.path.exists("data/"):
# os.makedirs("data/")
# path = os.path.join(os.getcwd(), savepath)
# with h5py.File(path, 'w') as hf:
# hf.create_dataset('input', data=sub_ip)
# hf.create_dataset('label', data=sub_la)
# if __name__ == "__main__":
# main()
\ No newline at end of file
No preview for this file type