Showing
20 changed files
with
340 additions
and
31 deletions
... | @@ -45,9 +45,8 @@ from pycocotools.mask import encode | ... | @@ -45,9 +45,8 @@ from pycocotools.mask import encode |
45 | import argparse | 45 | import argparse |
46 | 46 | ||
47 | parser = argparse.ArgumentParser(description="PyTorch CARN") | 47 | parser = argparse.ArgumentParser(description="PyTorch CARN") |
48 | -parser.add_argument("--data_path", type=str, default = "/home/ubuntu/JH/exp1/dataset") | ||
49 | parser.add_argument("--valid_data_path", type=str) | 48 | parser.add_argument("--valid_data_path", type=str) |
50 | -parser.add_argument("--rescale_factor", type=int, default=4, help="rescale factor for using in training") | 49 | +parser.add_argument("--rescale_factor", type=int, help="rescale factor for using in training") |
51 | parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training") | 50 | parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training") |
52 | parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training") | 51 | parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training") |
53 | parser.add_argument('--batch_size', type=int, default=256) | 52 | parser.add_argument('--batch_size', type=int, default=256) |
... | @@ -153,9 +152,12 @@ for iter in range(0, 100): | ... | @@ -153,9 +152,12 @@ for iter in range(0, 100): |
153 | 152 | ||
154 | globals()['maxRange_{}'.format(image_file_number)] = maxRange | 153 | globals()['maxRange_{}'.format(image_file_number)] = maxRange |
155 | 154 | ||
156 | - # p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png' | 155 | + p2_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p2/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p2' +'.png') |
156 | + p3_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p3/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p3' +'.png') | ||
157 | + p4_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p4/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p4' +'.png') | ||
158 | +# p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png' | ||
157 | # p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | 159 | # p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) |
158 | - p2_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p2/' + image_prefix + '000000' + image_file_number + '_p2' +'.png') | 160 | + |
159 | # # y_p2, cb, cr = p2_feature_img.split() | 161 | # # y_p2, cb, cr = p2_feature_img.split() |
160 | p2_feature_arr = np.array(p2_feature_img) | 162 | p2_feature_arr = np.array(p2_feature_img) |
161 | p2_feature_arr_round = myRound(p2_feature_arr) | 163 | p2_feature_arr_round = myRound(p2_feature_arr) |
... | @@ -163,14 +165,14 @@ for iter in range(0, 100): | ... | @@ -163,14 +165,14 @@ for iter in range(0, 100): |
163 | # p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png') | 165 | # p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png') |
164 | 166 | ||
165 | # p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | 167 | # p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) |
166 | - p3_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p3/' + image_prefix + '000000' + image_file_number + '_p3' +'.png') | 168 | + |
167 | # # y_p3, cb2, cr2 = p3_feature_img.split() | 169 | # # y_p3, cb2, cr2 = p3_feature_img.split() |
168 | p3_feature_arr = np.array(p3_feature_img) | 170 | p3_feature_arr = np.array(p3_feature_img) |
169 | p3_feature_arr_round = myRound(p3_feature_arr) | 171 | p3_feature_arr_round = myRound(p3_feature_arr) |
170 | 172 | ||
171 | # p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png') | 173 | # p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png') |
172 | # p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | 174 | # p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) |
173 | - p4_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p4/' + image_prefix + '000000' + image_file_number + '_p4' +'.png') | 175 | + |
174 | # y_p4, cb3, cr3 = p4_feature_img.split() | 176 | # y_p4, cb3, cr3 = p4_feature_img.split() |
175 | p4_feature_arr = np.array(p4_feature_img) | 177 | p4_feature_arr = np.array(p4_feature_img) |
176 | p4_feature_arr_round = myRound(p4_feature_arr) | 178 | p4_feature_arr_round = myRound(p4_feature_arr) | ... | ... |
code/vdsr/calculate_mAP_HR.py
0 → 100644
This diff is collapsed. Click to expand it.
code/vdsr/calculate_mAP_bicubic.py
0 → 100644
This diff is collapsed. Click to expand it.
code/vdsr/checkpoint/model_epoch_6_p2.pth
0 → 100644
No preview for this file type
... | @@ -117,9 +117,7 @@ model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model" | ... | @@ -117,9 +117,7 @@ model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model" |
117 | 117 | ||
118 | scales = [opt.scaleFactor] | 118 | scales = [opt.scaleFactor] |
119 | 119 | ||
120 | -# image_list = glob.glob(opt.dataset+"/*.*") | ||
121 | if opt.singleImage == "Y" : | 120 | if opt.singleImage == "Y" : |
122 | - # image_list = crop_feature(opt.dataset, opt.featureType, opt.scaleFactor) | ||
123 | image_list = opt.dataset | 121 | image_list = opt.dataset |
124 | else: | 122 | else: |
125 | image_path = os.path.join(opt.dataset, opt.featureType) | 123 | image_path = os.path.join(opt.dataset, opt.featureType) |
... | @@ -150,7 +148,6 @@ for scale in scales: | ... | @@ -150,7 +148,6 @@ for scale in scales: |
150 | f_bi = f_bi.astype(float) | 148 | f_bi = f_bi.astype(float) |
151 | features_bicubic.append(f_bi) | 149 | features_bicubic.append(f_bi) |
152 | psnr_bicubic = PSNR(f_bi, f_gt, shave_border=scale) | 150 | psnr_bicubic = PSNR(f_bi, f_gt, shave_border=scale) |
153 | - # psnr_bicubic = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_bi)) | ||
154 | avg_psnr_bicubic += psnr_bicubic | 151 | avg_psnr_bicubic += psnr_bicubic |
155 | 152 | ||
156 | f_input = f_bi/255. | 153 | f_input = f_bi/255. |
... | @@ -177,7 +174,6 @@ for scale in scales: | ... | @@ -177,7 +174,6 @@ for scale in scales: |
177 | f_sr = f_sr[0,:,:] | 174 | f_sr = f_sr[0,:,:] |
178 | 175 | ||
179 | psnr_predicted = PSNR(f_sr, f_gt, shave_border=scale) | 176 | psnr_predicted = PSNR(f_sr, f_gt, shave_border=scale) |
180 | - # psnr_predicted = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_sr)) | ||
181 | avg_psnr_predicted += psnr_predicted | 177 | avg_psnr_predicted += psnr_predicted |
182 | features.append(f_sr) | 178 | features.append(f_sr) |
183 | 179 | ||
... | @@ -187,23 +183,3 @@ for scale in scales: | ... | @@ -187,23 +183,3 @@ for scale in scales: |
187 | print("Dataset=", opt.dataset) | 183 | print("Dataset=", opt.dataset) |
188 | print("Average PSNR_predicted=", avg_psnr_predicted/count) | 184 | print("Average PSNR_predicted=", avg_psnr_predicted/count) |
189 | print("Average PSNR_bicubic=", avg_psnr_bicubic/count) | 185 | print("Average PSNR_bicubic=", avg_psnr_bicubic/count) |
190 | - | ||
191 | - | ||
192 | -# Show graph | ||
193 | -# f_gt = Image.fromarray(f_gt) | ||
194 | -# f_b = Image.fromarray(f_bi) | ||
195 | -# f_sr = Image.fromarray(f_sr) | ||
196 | - | ||
197 | -# fig = plt.figure(figsize=(18, 16), dpi= 80) | ||
198 | -# ax = plt.subplot("131") | ||
199 | -# ax.imshow(f_gt) | ||
200 | -# ax.set_title("GT") | ||
201 | - | ||
202 | -# ax = plt.subplot("132") | ||
203 | -# ax.imshow(f_bi) | ||
204 | -# ax.set_title("Input(bicubic)") | ||
205 | - | ||
206 | -# ax = plt.subplot("133") | ||
207 | -# ax.imshow(f_sr) | ||
208 | -# ax.set_title("Output(vdsr)") | ||
209 | -# plt.show() | ... | ... |
... | @@ -13,7 +13,7 @@ from datasets import get_training_data_loader | ... | @@ -13,7 +13,7 @@ from datasets import get_training_data_loader |
13 | # from feature_dataset import get_training_data_loader | 13 | # from feature_dataset import get_training_data_loader |
14 | # from make_dataset import make_dataset | 14 | # from make_dataset import make_dataset |
15 | import numpy as np | 15 | import numpy as np |
16 | -from dataFromH5 import Read_dataset_h5 | 16 | +# from dataFromH5 import Read_dataset_h5 |
17 | import matplotlib.pyplot as plt | 17 | import matplotlib.pyplot as plt |
18 | import math | 18 | import math |
19 | 19 | ... | ... |
code/vdsr/not_used/dataFromH5.py
0 → 100644
1 | +import torch.utils.data as data | ||
2 | +import torch | ||
3 | +import h5py | ||
4 | + | ||
5 | +class Read_dataset_h5(data.Dataset): | ||
6 | + def __init__(self, file_path): | ||
7 | + super(Read_dataset_h5, self).__init__() | ||
8 | + hf = h5py.File(file_path) | ||
9 | + self.input = hf.get('input') | ||
10 | + self.label = hf.get('label') | ||
11 | + | ||
12 | + def __getitem__(self, index): | ||
13 | + return torch.from_numpy(self.input[index,:,:,:]).float(), torch.from_numpy(self.label[index,:,:,:]).float() | ||
14 | + | ||
15 | + def __len__(self): | ||
16 | + return self.input.shape[0] |
code/vdsr/not_used/feature_dataset.py
0 → 100644
1 | +''' | ||
2 | +현재 사용하지 않음. | ||
3 | +''' | ||
4 | + | ||
5 | +# from torch.utils.data import Dataset | ||
6 | +# from PIL import Image | ||
7 | +# import os | ||
8 | +# from glob import glob | ||
9 | +# from torchvision import transforms | ||
10 | +# from torch.utils.data.dataset import Dataset | ||
11 | +# import torch | ||
12 | +# import pdb | ||
13 | +# import math | ||
14 | +# import numpy as np | ||
15 | +# import cv2 | ||
16 | + | ||
17 | + | ||
18 | +# class FeatureDataset(Dataset): | ||
19 | +# def __init__(self, data_path, datatype, rescale_factor, valid): | ||
20 | +# self.data_path = data_path | ||
21 | +# self.datatype = datatype | ||
22 | +# self.rescale_factor = rescale_factor | ||
23 | +# if not os.path.exists(data_path): | ||
24 | +# raise Exception(f"[!] {self.data_path} not existed") | ||
25 | +# if (valid): | ||
26 | +# self.hr_path = os.path.join(self.data_path, 'valid') | ||
27 | +# self.hr_path = os.path.join(self.hr_path, self.datatype) | ||
28 | +# else: | ||
29 | +# self.hr_path = os.path.join(self.data_path, 'LR_2') | ||
30 | +# self.hr_path = os.path.join(self.hr_path, self.datatype) | ||
31 | +# print(self.hr_path) | ||
32 | +# self.names = os.listdir(self.hr_path) | ||
33 | +# self.hr_path = sorted(glob(os.path.join(self.hr_path, "*.*"))) | ||
34 | +# self.hr_imgs = [] | ||
35 | + | ||
36 | +# w, h = Image.open(self.hr_path[0]).size | ||
37 | +# self.width = int(w / 16) | ||
38 | +# self.height = int(h / 16) | ||
39 | +# self.lwidth = int(self.width / self.rescale_factor) # rescale_factor만큼 크기를 줄인다. | ||
40 | +# self.lheight = int(self.height / self.rescale_factor) | ||
41 | +# print("lr: ({} {}), hr: ({} {})".format(self.lwidth, self.lheight, self.width, self.height)) | ||
42 | + | ||
43 | +# self.original_hr_imgs = [] #원본 250개 | ||
44 | +# print("crop features ...") | ||
45 | +# for hr in self.hr_path: # 256개의 피쳐로 나눈다. | ||
46 | +# hr_cropped_imgs = [] | ||
47 | +# hr_image = Image.open(hr) # .convert('RGB')\ | ||
48 | +# self.original_hr_imgs.append(np.array(hr_image).astype(float)) # 원본을 저장한다. | ||
49 | +# for i in range(16): | ||
50 | +# for j in range(16): | ||
51 | +# (left, upper, right, lower) = ( | ||
52 | +# i * self.width, j * self.height, (i + 1) * self.width, (j + 1) * self.height) | ||
53 | +# crop = hr_image.crop((left, upper, right, lower)) | ||
54 | +# hr_cropped_imgs.append(crop) | ||
55 | +# self.hr_imgs.append(hr_cropped_imgs) | ||
56 | + | ||
57 | +# self.final_results = [] # [250개] | ||
58 | +# print("resize and concat features ...") | ||
59 | +# for i in range(0, len(self.hr_imgs)): | ||
60 | +# hr_img = self.hr_imgs[i] | ||
61 | +# interpolated_images = [] | ||
62 | +# for img in hr_img: | ||
63 | +# image = img.resize((self.lwidth, self.lheight), Image.BICUBIC) | ||
64 | +# image = image.resize((self.width, self.height), Image.BICUBIC) | ||
65 | +# interpolated_images.append(np.array(image).astype(float)) | ||
66 | +# self.final_results.append(concatFeatures(interpolated_images, self.names[i], self.datatype)) | ||
67 | +# print(self.original_hr_imgs) | ||
68 | +# print(self.final_results) | ||
69 | + | ||
70 | +# def __getitem__(self, idx): | ||
71 | +# ground_truth = self.original_hr_imgs[idx] | ||
72 | +# final_result = self.final_results[idx] # list | ||
73 | +# return transforms.ToTensor()(final_result), transforms.ToTensor()(ground_truth) # hr_image를 변환한 것과, 변환하지 않은 것을 Tensor로 각각 반환 | ||
74 | + | ||
75 | +# def __len__(self): | ||
76 | +# return len(self.hr_path) | ||
77 | + | ||
78 | + | ||
79 | +# def concatFeatures(features, image_name, feature_type): | ||
80 | +# features_0 = features[:16] | ||
81 | +# features_1 = features[16:32] | ||
82 | +# features_2 = features[32:48] | ||
83 | +# features_3 = features[48:64] | ||
84 | +# features_4 = features[64:80] | ||
85 | +# features_5 = features[80:96] | ||
86 | +# features_6 = features[96:112] | ||
87 | +# features_7 = features[112:128] | ||
88 | +# features_8 = features[128:144] | ||
89 | +# features_9 = features[144:160] | ||
90 | +# features_10 = features[160:176] | ||
91 | +# features_11 = features[176:192] | ||
92 | +# features_12 = features[192:208] | ||
93 | +# features_13 = features[208:224] | ||
94 | +# features_14 = features[224:240] | ||
95 | +# features_15 = features[240:256] | ||
96 | + | ||
97 | +# features_new = list() | ||
98 | +# features_new.extend([ | ||
99 | +# concat_vertical(features_0), | ||
100 | +# concat_vertical(features_1), | ||
101 | +# concat_vertical(features_2), | ||
102 | +# concat_vertical(features_3), | ||
103 | +# concat_vertical(features_4), | ||
104 | +# concat_vertical(features_5), | ||
105 | +# concat_vertical(features_6), | ||
106 | +# concat_vertical(features_7), | ||
107 | +# concat_vertical(features_8), | ||
108 | +# concat_vertical(features_9), | ||
109 | +# concat_vertical(features_10), | ||
110 | +# concat_vertical(features_11), | ||
111 | +# concat_vertical(features_12), | ||
112 | +# concat_vertical(features_13), | ||
113 | +# concat_vertical(features_14), | ||
114 | +# concat_vertical(features_15) | ||
115 | +# ]) | ||
116 | + | ||
117 | +# final_concat_feature = concat_horizontal(features_new) | ||
118 | + | ||
119 | +# save_path = "features/LR_2/" + feature_type + "/" + image_name | ||
120 | +# if not os.path.exists("features/"): | ||
121 | +# os.makedirs("features/") | ||
122 | +# if not os.path.exists("features/LR_2/"): | ||
123 | +# os.makedirs("features/LR_2/") | ||
124 | +# if not os.path.exists("features/LR_2/" + feature_type): | ||
125 | +# os.makedirs("features/LR_2/" + feature_type) | ||
126 | +# cv2.imwrite(save_path, final_concat_feature) | ||
127 | + | ||
128 | +# return np.array(final_concat_feature).astype(float) | ||
129 | + | ||
130 | +# def concat_horizontal(feature): | ||
131 | +# result = cv2.hconcat([feature[0], feature[1]]) | ||
132 | +# for i in range(2, len(feature)): | ||
133 | +# result = cv2.hconcat([result, feature[i]]) | ||
134 | +# return result | ||
135 | + | ||
136 | +# def concat_vertical(feature): | ||
137 | +# result = cv2.vconcat([feature[0], feature[1]]) | ||
138 | +# for i in range(2, len(feature)): | ||
139 | +# result = cv2.vconcat([result, feature[i]]) | ||
140 | +# return result | ||
141 | + | ||
142 | + | ||
143 | +# def get_data_loader_test_version(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
144 | +# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False) | ||
145 | +# print("dataset의 사이즈는 {}".format(len(full_dataset))) | ||
146 | +# for f in full_dataset: | ||
147 | +# print(type(f)) | ||
148 | + | ||
149 | + | ||
150 | +# def get_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
151 | +# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False) | ||
152 | +# train_size = int(0.9 * len(full_dataset)) | ||
153 | +# test_size = len(full_dataset) - train_size | ||
154 | +# train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) | ||
155 | +# torch.manual_seed(3334) | ||
156 | +# train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, | ||
157 | +# num_workers=num_workers, pin_memory=False) | ||
158 | +# test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, | ||
159 | +# num_workers=num_workers, pin_memory=True) | ||
160 | + | ||
161 | +# return train_loader, test_loader | ||
162 | + | ||
163 | + | ||
164 | +# def get_training_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
165 | +# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False) | ||
166 | +# torch.manual_seed(3334) | ||
167 | +# train_loader = torch.utils.data.DataLoader(dataset=full_dataset, batch_size=batch_size, shuffle=True, | ||
168 | +# num_workers=num_workers, pin_memory=False) | ||
169 | +# return train_loader | ||
170 | + | ||
171 | + | ||
172 | +# def get_infer_dataloader(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
173 | +# dataset = FeatureDataset(data_path, feature_type, rescale_factor, True) | ||
174 | +# data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, | ||
175 | +# num_workers=num_workers, pin_memory=False) | ||
176 | +# return data_loader | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
code/vdsr/not_used/make_dataset.py
0 → 100644
1 | +''' | ||
2 | +현재 사용하지 않음. | ||
3 | +''' | ||
4 | + | ||
5 | + | ||
6 | +# from crop_feature import crop_feature | ||
7 | +# import os | ||
8 | +# from PIL import Image | ||
9 | +# import numpy as np | ||
10 | +# import torch | ||
11 | +# from torch.utils.data.dataset import Dataset | ||
12 | +# from torch.utils.data import TensorDataset, DataLoader | ||
13 | +# from torchvision import transforms | ||
14 | +# import cv2 | ||
15 | +# import glob | ||
16 | +# import h5py | ||
17 | +# import argparse | ||
18 | + | ||
19 | +# parser = argparse.ArgumentParser(description="make Dataset") | ||
20 | +# parser.add_argument("--dataset", type=str) | ||
21 | +# parser.add_argument("--featureType", type=str) | ||
22 | +# parser.add_argument("--scaleFactor", type=int) | ||
23 | +# parser.add_argument("--batchSize", type=int, default=16) | ||
24 | +# parser.add_argument("--threads", type=int, default=3) | ||
25 | + | ||
26 | +# # dataset, feature_type, scale_factor, batch_size, num_workers | ||
27 | +# def main(): | ||
28 | +# opt = parser.parse_args() | ||
29 | + | ||
30 | +# dataset = opt.dataset | ||
31 | +# feature_type = opt.featureType | ||
32 | +# scale_factor = opt.scaleFactor | ||
33 | +# batch_size = opt.batchSize | ||
34 | +# num_workers = opt.threads | ||
35 | + | ||
36 | +# print_message = True | ||
37 | +# dataset = dataset+"/LR_2" | ||
38 | +# image_path = os.path.join(dataset, feature_type) | ||
39 | +# image_list = os.listdir(image_path) | ||
40 | +# input = list() | ||
41 | +# label = list() | ||
42 | + | ||
43 | +# for image in image_list: | ||
44 | +# origin_image = Image.open(os.path.join(image_path,image)) | ||
45 | +# label.append(np.array(origin_image).astype(float)) | ||
46 | +# image_cropped = crop_feature(os.path.join(image_path, image), feature_type, scale_factor, print_message) | ||
47 | +# print_message = False | ||
48 | +# # bicubic interpolation | ||
49 | +# reconstructed_features = list() | ||
50 | +# print("crop is done.") | ||
51 | +# for crop in image_cropped: | ||
52 | +# w, h = crop.size | ||
53 | +# bicubic_interpolated_image = crop.resize((w//scale_factor, h//scale_factor), Image.BICUBIC) | ||
54 | +# bicubic_interpolated_image = bicubic_interpolated_image.resize((w,h), Image.BICUBIC) # 다시 원래 크기로 키우기 | ||
55 | +# reconstructed_features.append(np.array(bicubic_interpolated_image).astype(float)) | ||
56 | +# input.append(concatFeatures(reconstructed_features, image, feature_type)) | ||
57 | + | ||
58 | +# print("concat is done.") | ||
59 | +# if len(input) == len(label): | ||
60 | +# save_h5(input, label, 'data/train_{}.h5'.format(feature_type)) | ||
61 | +# print("saved..") | ||
62 | +# else: | ||
63 | +# print(len(input), len(label), "이 다릅니다.") | ||
64 | + | ||
65 | + | ||
66 | +# def concatFeatures(features, image_name, feature_type): | ||
67 | +# features_0 = features[:16] | ||
68 | +# features_1 = features[16:32] | ||
69 | +# features_2 = features[32:48] | ||
70 | +# features_3 = features[48:64] | ||
71 | +# features_4 = features[64:80] | ||
72 | +# features_5 = features[80:96] | ||
73 | +# features_6 = features[96:112] | ||
74 | +# features_7 = features[112:128] | ||
75 | +# features_8 = features[128:144] | ||
76 | +# features_9 = features[144:160] | ||
77 | +# features_10 = features[160:176] | ||
78 | +# features_11 = features[176:192] | ||
79 | +# features_12 = features[192:208] | ||
80 | +# features_13 = features[208:224] | ||
81 | +# features_14 = features[224:240] | ||
82 | +# features_15 = features[240:256] | ||
83 | + | ||
84 | +# features_new = list() | ||
85 | +# features_new.extend([ | ||
86 | +# concat_vertical(features_0), | ||
87 | +# concat_vertical(features_1), | ||
88 | +# concat_vertical(features_2), | ||
89 | +# concat_vertical(features_3), | ||
90 | +# concat_vertical(features_4), | ||
91 | +# concat_vertical(features_5), | ||
92 | +# concat_vertical(features_6), | ||
93 | +# concat_vertical(features_7), | ||
94 | +# concat_vertical(features_8), | ||
95 | +# concat_vertical(features_9), | ||
96 | +# concat_vertical(features_10), | ||
97 | +# concat_vertical(features_11), | ||
98 | +# concat_vertical(features_12), | ||
99 | +# concat_vertical(features_13), | ||
100 | +# concat_vertical(features_14), | ||
101 | +# concat_vertical(features_15) | ||
102 | +# ]) | ||
103 | + | ||
104 | +# final_concat_feature = concat_horizontal(features_new) | ||
105 | + | ||
106 | +# save_path = "features/LR_2/" + feature_type + "/" + image_name | ||
107 | +# if not os.path.exists("features/"): | ||
108 | +# os.makedirs("features/") | ||
109 | +# if not os.path.exists("features/LR_2/"): | ||
110 | +# os.makedirs("features/LR_2/") | ||
111 | +# if not os.path.exists("features/LR_2/" + feature_type): | ||
112 | +# os.makedirs("features/LR_2/" + feature_type) | ||
113 | +# cv2.imwrite(save_path, final_concat_feature) | ||
114 | + | ||
115 | +# return np.array(final_concat_feature).astype(float) | ||
116 | + | ||
117 | +# def concat_horizontal(feature): | ||
118 | +# result = cv2.hconcat([feature[0], feature[1]]) | ||
119 | +# for i in range(2, len(feature)): | ||
120 | +# result = cv2.hconcat([result, feature[i]]) | ||
121 | +# return result | ||
122 | + | ||
123 | +# def concat_vertical(feature): | ||
124 | +# result = cv2.vconcat([feature[0], feature[1]]) | ||
125 | +# for i in range(2, len(feature)): | ||
126 | +# result = cv2.vconcat([result, feature[i]]) | ||
127 | +# return result | ||
128 | + | ||
129 | +# def save_h5(sub_ip, sub_la, savepath): | ||
130 | +# if not os.path.exists("data/"): | ||
131 | +# os.makedirs("data/") | ||
132 | + | ||
133 | +# path = os.path.join(os.getcwd(), savepath) | ||
134 | +# with h5py.File(path, 'w') as hf: | ||
135 | +# hf.create_dataset('input', data=sub_ip) | ||
136 | +# hf.create_dataset('label', data=sub_la) | ||
137 | + | ||
138 | +# if __name__ == "__main__": | ||
139 | +# main() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
code/vdsr/outputs/p2/HR_p2.png
0 → 100644

7.26 MB
code/vdsr/outputs/p2/LR_x3_p2.png
0 → 100644

4.71 MB
code/vdsr/outputs/p2/SR_x3_p2.png
0 → 100644

5.12 MB
code/vdsr/outputs/p3/HR_p3.png
0 → 100644

1.83 MB
code/vdsr/outputs/p3/LR_x3_p3.png
0 → 100644

1.41 MB
code/vdsr/outputs/p3/SR_x3_p3.png
0 → 100644

1.41 MB
code/vdsr/outputs/p4/HR_p4.png
0 → 100644

466 KB
code/vdsr/outputs/p4/LR_x3_p4.png
0 → 100644

363 KB
code/vdsr/outputs/p4/SR_x3_p4.png
0 → 100644

363 KB
최종보고서/발표ppt_2017103084_서민정.pptx
0 → 100644
No preview for this file type
최종보고서/최종보고서.docx
0 → 100644
No preview for this file type
-
Please register or login to post a comment