Showing
20 changed files
with
1098 additions
and
31 deletions
... | @@ -45,9 +45,8 @@ from pycocotools.mask import encode | ... | @@ -45,9 +45,8 @@ from pycocotools.mask import encode |
45 | import argparse | 45 | import argparse |
46 | 46 | ||
47 | parser = argparse.ArgumentParser(description="PyTorch CARN") | 47 | parser = argparse.ArgumentParser(description="PyTorch CARN") |
48 | -parser.add_argument("--data_path", type=str, default = "/home/ubuntu/JH/exp1/dataset") | ||
49 | parser.add_argument("--valid_data_path", type=str) | 48 | parser.add_argument("--valid_data_path", type=str) |
50 | -parser.add_argument("--rescale_factor", type=int, default=4, help="rescale factor for using in training") | 49 | +parser.add_argument("--rescale_factor", type=int, help="rescale factor for using in training") |
51 | parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training") | 50 | parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training") |
52 | parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training") | 51 | parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training") |
53 | parser.add_argument('--batch_size', type=int, default=256) | 52 | parser.add_argument('--batch_size', type=int, default=256) |
... | @@ -153,9 +152,12 @@ for iter in range(0, 100): | ... | @@ -153,9 +152,12 @@ for iter in range(0, 100): |
153 | 152 | ||
154 | globals()['maxRange_{}'.format(image_file_number)] = maxRange | 153 | globals()['maxRange_{}'.format(image_file_number)] = maxRange |
155 | 154 | ||
156 | - # p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png' | 155 | + p2_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p2/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p2' +'.png') |
156 | + p3_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p3/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p3' +'.png') | ||
157 | + p4_feature_img = Image.open('/content/drive/MyDrive/result/inference_x{}/LR_2/p4/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p4' +'.png') | ||
158 | +# p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png' | ||
157 | # p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | 159 | # p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) |
158 | - p2_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p2/' + image_prefix + '000000' + image_file_number + '_p2' +'.png') | 160 | + |
159 | # # y_p2, cb, cr = p2_feature_img.split() | 161 | # # y_p2, cb, cr = p2_feature_img.split() |
160 | p2_feature_arr = np.array(p2_feature_img) | 162 | p2_feature_arr = np.array(p2_feature_img) |
161 | p2_feature_arr_round = myRound(p2_feature_arr) | 163 | p2_feature_arr_round = myRound(p2_feature_arr) |
... | @@ -163,14 +165,14 @@ for iter in range(0, 100): | ... | @@ -163,14 +165,14 @@ for iter in range(0, 100): |
163 | # p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png') | 165 | # p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png') |
164 | 166 | ||
165 | # p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | 167 | # p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) |
166 | - p3_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p3/' + image_prefix + '000000' + image_file_number + '_p3' +'.png') | 168 | + |
167 | # # y_p3, cb2, cr2 = p3_feature_img.split() | 169 | # # y_p3, cb2, cr2 = p3_feature_img.split() |
168 | p3_feature_arr = np.array(p3_feature_img) | 170 | p3_feature_arr = np.array(p3_feature_img) |
169 | p3_feature_arr_round = myRound(p3_feature_arr) | 171 | p3_feature_arr_round = myRound(p3_feature_arr) |
170 | 172 | ||
171 | # p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png') | 173 | # p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png') |
172 | # p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | 174 | # p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) |
173 | - p4_feature_img = Image.open('/content/drive/MyDrive/result/inference/LR_2/p4/' + image_prefix + '000000' + image_file_number + '_p4' +'.png') | 175 | + |
174 | # y_p4, cb3, cr3 = p4_feature_img.split() | 176 | # y_p4, cb3, cr3 = p4_feature_img.split() |
175 | p4_feature_arr = np.array(p4_feature_img) | 177 | p4_feature_arr = np.array(p4_feature_img) |
176 | p4_feature_arr_round = myRound(p4_feature_arr) | 178 | p4_feature_arr_round = myRound(p4_feature_arr) | ... | ... |
code/vdsr/calculate_mAP_HR.py
0 → 100644
1 | +# # # [1] | ||
2 | +# # # install dependencies: | ||
3 | +# !pip install pyyaml==5.1 | ||
4 | +# import torch, torchvision | ||
5 | +# print(torch.__version__, torch.cuda.is_available()) | ||
6 | +# !gcc --version | ||
7 | +# # opencv is pre-installed on colab | ||
8 | + | ||
9 | +# # # [2] | ||
10 | +# # # install detectron2: (Colab has CUDA 10.1 + torch 1.8) | ||
11 | +# # # See https://detectron2.readthedocs.io/tutorials/install.html for instructions | ||
12 | +# import torch | ||
13 | +# assert torch.__version__.startswith("1.8") # need to manually install torch 1.8 if Colab changes its default version | ||
14 | +# !pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html | ||
15 | +# # exit(0) # After installation, you need to "restart runtime" in Colab. This line can also restart runtime | ||
16 | + | ||
17 | +# # # [3] | ||
18 | +# # # Some basic setup: | ||
19 | +# # # Setup detectron2 logger | ||
20 | +# import detectron2 | ||
21 | +# from detectron2.utils.logger import setup_logger | ||
22 | +# setup_logger() | ||
23 | + | ||
24 | +# import some common libraries | ||
25 | +import torch | ||
26 | +import numpy as np | ||
27 | +import os, json, cv2, random, math | ||
28 | +from PIL import Image | ||
29 | +from torch.nn.utils.rnn import pad_sequence | ||
30 | + | ||
31 | +# import some common detectron2 utilities | ||
32 | +from detectron2 import model_zoo | ||
33 | +from detectron2.engine import DefaultPredictor | ||
34 | +from detectron2.config import get_cfg | ||
35 | +from detectron2.utils.visualizer import Visualizer | ||
36 | +from detectron2.data import MetadataCatalog, DatasetCatalog | ||
37 | +from detectron2.modeling import build_model, build_backbone | ||
38 | +from detectron2.checkpoint import DetectionCheckpointer | ||
39 | +from detectron2.utils.visualizer import Visualizer | ||
40 | +import detectron2.data.transforms as T | ||
41 | + | ||
42 | +from pycocotools.coco import COCO | ||
43 | +from pycocotools.cocoeval import COCOeval | ||
44 | +from pycocotools.mask import encode | ||
45 | +import argparse | ||
46 | + | ||
47 | +parser = argparse.ArgumentParser(description="PyTorch CARN") | ||
48 | +parser.add_argument("--valid_data_path", type=str) | ||
49 | +parser.add_argument("--rescale_factor", type=int, default=4, help="rescale factor for using in training") | ||
50 | +parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training") | ||
51 | +parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training") | ||
52 | +parser.add_argument('--batch_size', type=int, default=256) | ||
53 | +opt = parser.parse_args() | ||
54 | +print(opt) | ||
55 | + | ||
56 | + | ||
57 | +def myRound(x): # 양수와 음수에 대해 0을 대칭으로 rounding | ||
58 | + abs_x = abs(x) | ||
59 | + val = np.int16(abs_x + 0.5) | ||
60 | + val2 = np.choose( | ||
61 | + x < 0, | ||
62 | + [ | ||
63 | + val, val*(-1) | ||
64 | + ] | ||
65 | + ) | ||
66 | + return val2 | ||
67 | + | ||
68 | +def myClip(x, maxV): | ||
69 | + val = np.choose( | ||
70 | + x > maxV, | ||
71 | + [ | ||
72 | + x, maxV | ||
73 | + ] | ||
74 | + ) | ||
75 | + return val | ||
76 | + | ||
77 | +image_idx = 0 | ||
78 | +cfg = get_cfg() | ||
79 | +# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library | ||
80 | +cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) | ||
81 | +cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model | ||
82 | +# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well | ||
83 | +cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") | ||
84 | + | ||
85 | +model = build_model(cfg) | ||
86 | +DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) | ||
87 | +model.eval() | ||
88 | + | ||
89 | +image_idx = 0 | ||
90 | +anns = 0 | ||
91 | + | ||
92 | +# Original_8bit | ||
93 | + | ||
94 | +image_files = ['001000', '002153', '008021', '009769', '009891', '015335', '017627', '018150', '018837', '022589'] | ||
95 | +image_files.extend(['022935', '023230', '024610', '025560', '025593', '027620', '155341', '161397', '165336', '166287']) | ||
96 | +image_files.extend(['166642', '169996', '172330', '172648', '176606', '176701', '179765', '180101', '186296', '250758']) | ||
97 | +image_files.extend(['259382', '267191', '287545', '287649', '289741', '293245', '308328', '309452', '335529', '337987']) | ||
98 | +image_files.extend(['338625', '344029', '350122', '389933', '393226', '395343', '395633', '401862', '402473', '402992']) | ||
99 | +image_files.extend(['404568', '406997', '408112', '410650', '414385', '414795', '415194', '415536', '416104', '416758']) | ||
100 | +image_files.extend(['427055', '428562', '430073', '433204', '447200', '447313', '448448', '452321', '453001', '458755']) | ||
101 | +image_files.extend(['462904', '463522', '464089', '468965', '469192', '469246', '471450', '474078', '474881', '475678']) | ||
102 | +image_files.extend(['475779', '537802', '542625', '543043', '543300', '543528', '547502', '550691', '553669', '567740']) | ||
103 | +image_files.extend(['570688', '570834', '571943', '573391', '574315', '575372', '575970', '578093', '579158', '581100']) | ||
104 | + | ||
105 | + | ||
106 | +for iter in range(0, 100): | ||
107 | + | ||
108 | + image_file_number = image_files[image_idx] | ||
109 | + aug = T.ResizeShortestEdge( | ||
110 | + # [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST | ||
111 | + # [480, 480], cfg.INPUT.MAX_SIZE_TEST | ||
112 | + [768, 768], cfg.INPUT.MAX_SIZE_TEST | ||
113 | + ) | ||
114 | + image_prefix = "COCO_val2017_" | ||
115 | + image = cv2.imread(opt.valid_data_path + '000000'+ image_file_number +'.jpg') | ||
116 | + # image = cv2.imread('./dataset/validset_100/000000'+ image_file_number +'.jpg') | ||
117 | + height, width = image.shape[:2] | ||
118 | + image = aug.get_transform(image).apply_image(image) | ||
119 | + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) | ||
120 | + inputs = [{"image": image, "height": height, "width": width}] | ||
121 | + with torch.no_grad(): | ||
122 | + images = model.preprocess_image(inputs) # don't forget to preprocess | ||
123 | + features = model.backbone(images.tensor) # set of cnn features | ||
124 | + | ||
125 | + | ||
126 | + p2_feature_original = features['p2'].to("cpu") | ||
127 | + p3_feature_original = features['p3'].to("cpu") | ||
128 | + p4_feature_original = features['p4'].to("cpu") | ||
129 | + | ||
130 | + bitDepth = 8 | ||
131 | + maxRange = [0, 0, 0, 0, 0] | ||
132 | + | ||
133 | + def maxVal(x): | ||
134 | + return pow(2, x) | ||
135 | + def offsetVal(x): | ||
136 | + return pow(2, x-1) | ||
137 | + | ||
138 | + def maxRange_layer(x): | ||
139 | + absolute_arr = torch.abs(x) * 2 | ||
140 | + max_arr = torch.max(absolute_arr) | ||
141 | + return torch.ceil(max_arr) | ||
142 | + | ||
143 | + | ||
144 | + act2 = p2_feature_original.squeeze() | ||
145 | + maxRange[0] = maxRange_layer(act2) | ||
146 | + | ||
147 | + act3 = p3_feature_original.squeeze() | ||
148 | + maxRange[1] = maxRange_layer(act3) | ||
149 | + | ||
150 | + act4 = p4_feature_original.squeeze() | ||
151 | + maxRange[2] = maxRange_layer(act4) | ||
152 | + | ||
153 | + globals()['maxRange_{}'.format(image_file_number)] = maxRange | ||
154 | + | ||
155 | + p2_feature_img = Image.open('/content/drive/MyDrive/validset_features/features/LR_1_2/p2/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p2' +'.png') | ||
156 | + p3_feature_img = Image.open('/content/drive/MyDrive/validset_features/features/LR_1_2/p3/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p3' +'.png') | ||
157 | + p4_feature_img = Image.open('/content/drive/MyDrive/validset_features/features/LR_1_2/p4/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p4' +'.png') | ||
158 | + # p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png' | ||
159 | + # p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | ||
160 | + | ||
161 | + # # y_p2, cb, cr = p2_feature_img.split() | ||
162 | + p2_feature_arr = np.array(p2_feature_img) | ||
163 | + p2_feature_arr_round = myRound(p2_feature_arr) | ||
164 | + | ||
165 | + # p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png') | ||
166 | + | ||
167 | + # p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | ||
168 | + | ||
169 | + # # y_p3, cb2, cr2 = p3_feature_img.split() | ||
170 | + p3_feature_arr = np.array(p3_feature_img) | ||
171 | + p3_feature_arr_round = myRound(p3_feature_arr) | ||
172 | + | ||
173 | + # p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png') | ||
174 | + # p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | ||
175 | + | ||
176 | + # y_p4, cb3, cr3 = p4_feature_img.split() | ||
177 | + p4_feature_arr = np.array(p4_feature_img) | ||
178 | + p4_feature_arr_round = myRound(p4_feature_arr) | ||
179 | + | ||
180 | + | ||
181 | + # 복원 | ||
182 | + recon_p2 = (((p2_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[0].numpy()) | ||
183 | + recon_p3 = (((p3_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[1].numpy()) | ||
184 | + recon_p4 = (((p4_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[2].numpy()) | ||
185 | + | ||
186 | + tensor_value = recon_p2 | ||
187 | + tensor_value2 = recon_p3 | ||
188 | + tensor_value3 = recon_p4 | ||
189 | + | ||
190 | + # # MSB 코드 끝 | ||
191 | + | ||
192 | + # lsb 및 원래 코드 | ||
193 | + # 복원 | ||
194 | + # recon_p2 = (((p2_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[0].numpy()) | ||
195 | + # recon_p3 = (((p3_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[1].numpy()) | ||
196 | + # recon_p4 = (((p4_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[2].numpy()) | ||
197 | + # recon_p5 = (((p5_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[3].numpy()) | ||
198 | + # recon_p6 = (((p6_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[4].numpy()) | ||
199 | + | ||
200 | + tensor_value = torch.as_tensor(recon_p2.astype("float32")) | ||
201 | + tensor_value2 = torch.as_tensor(recon_p3.astype("float32")) | ||
202 | + tensor_value3 = torch.as_tensor(recon_p4.astype("float32")) | ||
203 | + #lsb 및 원래 코드 끝 | ||
204 | + | ||
205 | + t = [None] * 16 | ||
206 | + t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[9], t[10], t[11], t[12], t[13], t[14], t[15] = torch.chunk(tensor_value, 16, dim=0) | ||
207 | + p2 = [None] * 256 | ||
208 | + | ||
209 | + t2 = [None] * 16 | ||
210 | + t2[0], t2[1], t2[2], t2[3], t2[4], t2[5], t2[6], t2[7], t2[8], t2[9], t2[10], t2[11], t2[12], t2[13], t2[14], t2[15] = torch.chunk(tensor_value2, 16, dim=0) | ||
211 | + p3 = [None] * 256 | ||
212 | + | ||
213 | + t3 = [None] * 16 | ||
214 | + t3[0], t3[1], t3[2], t3[3], t3[4], t3[5], t3[6], t3[7], t3[8], t3[9], t3[10], t3[11], t3[12], t3[13], t3[14], t3[15] = torch.chunk(tensor_value3, 16, dim=0) | ||
215 | + p4 = [None] * 256 | ||
216 | + | ||
217 | + p2[0], p2[1], p2[2], p2[3], p2[4], p2[5], p2[6], p2[7], p2[8], p2[9], p2[10], p2[11], p2[12], p2[13], p2[14], p2[15] = torch.chunk(t[0], 16, dim=1) | ||
218 | + p2[16], p2[17], p2[18], p2[19], p2[20], p2[21], p2[22], p2[23], p2[24], p2[25], p2[26], p2[27], p2[28], p2[29], p2[30], p2[31] = torch.chunk(t[1], 16, dim=1) | ||
219 | + p2[32], p2[33], p2[34], p2[35], p2[36], p2[37], p2[38], p2[39], p2[40], p2[41], p2[42], p2[43], p2[44], p2[45], p2[46], p2[47] = torch.chunk(t[2], 16, dim=1) | ||
220 | + p2[48], p2[49], p2[50], p2[51], p2[52], p2[53], p2[54], p2[55], p2[56], p2[57], p2[58], p2[59], p2[60], p2[61], p2[62], p2[63] = torch.chunk(t[3], 16, dim=1) | ||
221 | + p2[64], p2[65], p2[66], p2[67], p2[68], p2[69], p2[70], p2[71], p2[72], p2[73], p2[74], p2[75], p2[76], p2[77], p2[78], p2[79] = torch.chunk(t[4], 16, dim=1) | ||
222 | + p2[80], p2[81], p2[82], p2[83], p2[84], p2[85], p2[86], p2[87], p2[88], p2[89], p2[90], p2[91], p2[92], p2[93], p2[94], p2[95] = torch.chunk(t[5], 16, dim=1) | ||
223 | + p2[96], p2[97], p2[98], p2[99], p2[100], p2[101], p2[102], p2[103], p2[104], p2[105], p2[106], p2[107], p2[108], p2[109], p2[110], p2[111] = torch.chunk(t[6], 16, dim=1) | ||
224 | + p2[112], p2[113], p2[114], p2[115], p2[116], p2[117], p2[118], p2[119], p2[120], p2[121], p2[122], p2[123], p2[124], p2[125], p2[126], p2[127] = torch.chunk(t[7], 16, dim=1) | ||
225 | + p2[128], p2[129], p2[130], p2[131], p2[132], p2[133], p2[134], p2[135], p2[136], p2[137], p2[138], p2[139], p2[140], p2[141], p2[142], p2[143] = torch.chunk(t[8], 16, dim=1) | ||
226 | + p2[144], p2[145], p2[146], p2[147], p2[148], p2[149], p2[150], p2[151], p2[152], p2[153], p2[154], p2[155], p2[156], p2[157], p2[158], p2[159] = torch.chunk(t[9], 16, dim=1) | ||
227 | + p2[160], p2[161], p2[162], p2[163], p2[164], p2[165], p2[166], p2[167], p2[168], p2[169], p2[170], p2[171], p2[172], p2[173], p2[174], p2[175] = torch.chunk(t[10], 16, dim=1) | ||
228 | + p2[176], p2[177], p2[178], p2[179], p2[180], p2[181], p2[182], p2[183], p2[184], p2[185], p2[186], p2[187], p2[188], p2[189], p2[190], p2[191] = torch.chunk(t[11], 16, dim=1) | ||
229 | + p2[192], p2[193], p2[194], p2[195], p2[196], p2[197], p2[198], p2[199], p2[200], p2[201], p2[202], p2[203], p2[204], p2[205], p2[206], p2[207] = torch.chunk(t[12], 16, dim=1) | ||
230 | + p2[208], p2[209], p2[210], p2[211], p2[212], p2[213], p2[214], p2[215], p2[216], p2[217], p2[218], p2[219], p2[220], p2[221], p2[222], p2[223] = torch.chunk(t[13], 16, dim=1) | ||
231 | + p2[224], p2[225], p2[226], p2[227], p2[228], p2[229], p2[230], p2[231], p2[232], p2[233], p2[234], p2[235], p2[236], p2[237], p2[238], p2[239] = torch.chunk(t[14], 16, dim=1) | ||
232 | + p2[240], p2[241], p2[242], p2[243], p2[244], p2[245], p2[246], p2[247], p2[248], p2[249], p2[250], p2[251], p2[252], p2[253], p2[254], p2[255] = torch.chunk(t[15], 16, dim=1) | ||
233 | + | ||
234 | + p3[0], p3[1], p3[2], p3[3], p3[4], p3[5], p3[6], p3[7], p3[8], p3[9], p3[10], p3[11], p3[12], p3[13], p3[14], p3[15] = torch.chunk(t2[0], 16, dim=1) | ||
235 | + p3[16], p3[17], p3[18], p3[19], p3[20], p3[21], p3[22], p3[23], p3[24], p3[25], p3[26], p3[27], p3[28], p3[29], p3[30], p3[31] = torch.chunk(t2[1], 16, dim=1) | ||
236 | + p3[32], p3[33], p3[34], p3[35], p3[36], p3[37], p3[38], p3[39], p3[40], p3[41], p3[42], p3[43], p3[44], p3[45], p3[46], p3[47] = torch.chunk(t2[2], 16, dim=1) | ||
237 | + p3[48], p3[49], p3[50], p3[51], p3[52], p3[53], p3[54], p3[55], p3[56], p3[57], p3[58], p3[59], p3[60], p3[61], p3[62], p3[63] = torch.chunk(t2[3], 16, dim=1) | ||
238 | + p3[64], p3[65], p3[66], p3[67], p3[68], p3[69], p3[70], p3[71], p3[72], p3[73], p3[74], p3[75], p3[76], p3[77], p3[78], p3[79] = torch.chunk(t2[4], 16, dim=1) | ||
239 | + p3[80], p3[81], p3[82], p3[83], p3[84], p3[85], p3[86], p3[87], p3[88], p3[89], p3[90], p3[91], p3[92], p3[93], p3[94], p3[95] = torch.chunk(t2[5], 16, dim=1) | ||
240 | + p3[96], p3[97], p3[98], p3[99], p3[100], p3[101], p3[102], p3[103], p3[104], p3[105], p3[106], p3[107], p3[108], p3[109], p3[110], p3[111] = torch.chunk(t2[6], 16, dim=1) | ||
241 | + p3[112], p3[113], p3[114], p3[115], p3[116], p3[117], p3[118], p3[119], p3[120], p3[121], p3[122], p3[123], p3[124], p3[125], p3[126], p3[127] = torch.chunk(t2[7], 16, dim=1) | ||
242 | + p3[128], p3[129], p3[130], p3[131], p3[132], p3[133], p3[134], p3[135], p3[136], p3[137], p3[138], p3[139], p3[140], p3[141], p3[142], p3[143] = torch.chunk(t2[8], 16, dim=1) | ||
243 | + p3[144], p3[145], p3[146], p3[147], p3[148], p3[149], p3[150], p3[151], p3[152], p3[153], p3[154], p3[155], p3[156], p3[157], p3[158], p3[159] = torch.chunk(t2[9], 16, dim=1) | ||
244 | + p3[160], p3[161], p3[162], p3[163], p3[164], p3[165], p3[166], p3[167], p3[168], p3[169], p3[170], p3[171], p3[172], p3[173], p3[174], p3[175] = torch.chunk(t2[10], 16, dim=1) | ||
245 | + p3[176], p3[177], p3[178], p3[179], p3[180], p3[181], p3[182], p3[183], p3[184], p3[185], p3[186], p3[187], p3[188], p3[189], p3[190], p3[191] = torch.chunk(t2[11], 16, dim=1) | ||
246 | + p3[192], p3[193], p3[194], p3[195], p3[196], p3[197], p3[198], p3[199], p3[200], p3[201], p3[202], p3[203], p3[204], p3[205], p3[206], p3[207] = torch.chunk(t2[12], 16, dim=1) | ||
247 | + p3[208], p3[209], p3[210], p3[211], p3[212], p3[213], p3[214], p3[215], p3[216], p3[217], p3[218], p3[219], p3[220], p3[221], p3[222], p3[223] = torch.chunk(t2[13], 16, dim=1) | ||
248 | + p3[224], p3[225], p3[226], p3[227], p3[228], p3[229], p3[230], p3[231], p3[232], p3[233], p3[234], p3[235], p3[236], p3[237], p3[238], p3[239] = torch.chunk(t2[14], 16, dim=1) | ||
249 | + p3[240], p3[241], p3[242], p3[243], p3[244], p3[245], p3[246], p3[247], p3[248], p3[249], p3[250], p3[251], p3[252], p3[253], p3[254], p3[255] = torch.chunk(t2[15], 16, dim=1) | ||
250 | + | ||
251 | + p4[0], p4[1], p4[2], p4[3], p4[4], p4[5], p4[6], p4[7], p4[8], p4[9], p4[10], p4[11], p4[12], p4[13], p4[14], p4[15] = torch.chunk(t3[0], 16, dim=1) | ||
252 | + p4[16], p4[17], p4[18], p4[19], p4[20], p4[21], p4[22], p4[23], p4[24], p4[25], p4[26], p4[27], p4[28], p4[29], p4[30], p4[31] = torch.chunk(t3[1], 16, dim=1) | ||
253 | + p4[32], p4[33], p4[34], p4[35], p4[36], p4[37], p4[38], p4[39], p4[40], p4[41], p4[42], p4[43], p4[44], p4[45], p4[46], p4[47] = torch.chunk(t3[2], 16, dim=1) | ||
254 | + p4[48], p4[49], p4[50], p4[51], p4[52], p4[53], p4[54], p4[55], p4[56], p4[57], p4[58], p4[59], p4[60], p4[61], p4[62], p4[63] = torch.chunk(t3[3], 16, dim=1) | ||
255 | + p4[64], p4[65], p4[66], p4[67], p4[68], p4[69], p4[70], p4[71], p4[72], p4[73], p4[74], p4[75], p4[76], p4[77], p4[78], p4[79] = torch.chunk(t3[4], 16, dim=1) | ||
256 | + p4[80], p4[81], p4[82], p4[83], p4[84], p4[85], p4[86], p4[87], p4[88], p4[89], p4[90], p4[91], p4[92], p4[93], p4[94], p4[95] = torch.chunk(t3[5], 16, dim=1) | ||
257 | + p4[96], p4[97], p4[98], p4[99], p4[100], p4[101], p4[102], p4[103], p4[104], p4[105], p4[106], p4[107], p4[108], p4[109], p4[110], p4[111] = torch.chunk(t3[6], 16, dim=1) | ||
258 | + p4[112], p4[113], p4[114], p4[115], p4[116], p4[117], p4[118], p4[119], p4[120], p4[121], p4[122], p4[123], p4[124], p4[125], p4[126], p4[127] = torch.chunk(t3[7], 16, dim=1) | ||
259 | + p4[128], p4[129], p4[130], p4[131], p4[132], p4[133], p4[134], p4[135], p4[136], p4[137], p4[138], p4[139], p4[140], p4[141], p4[142], p4[143] = torch.chunk(t3[8], 16, dim=1) | ||
260 | + p4[144], p4[145], p4[146], p4[147], p4[148], p4[149], p4[150], p4[151], p4[152], p4[153], p4[154], p4[155], p4[156], p4[157], p4[158], p4[159] = torch.chunk(t3[9], 16, dim=1) | ||
261 | + p4[160], p4[161], p4[162], p4[163], p4[164], p4[165], p4[166], p4[167], p4[168], p4[169], p4[170], p4[171], p4[172], p4[173], p4[174], p4[175] = torch.chunk(t3[10], 16, dim=1) | ||
262 | + p4[176], p4[177], p4[178], p4[179], p4[180], p4[181], p4[182], p4[183], p4[184], p4[185], p4[186], p4[187], p4[188], p4[189], p4[190], p4[191] = torch.chunk(t3[11], 16, dim=1) | ||
263 | + p4[192], p4[193], p4[194], p4[195], p4[196], p4[197], p4[198], p4[199], p4[200], p4[201], p4[202], p4[203], p4[204], p4[205], p4[206], p4[207] = torch.chunk(t3[12], 16, dim=1) | ||
264 | + p4[208], p4[209], p4[210], p4[211], p4[212], p4[213], p4[214], p4[215], p4[216], p4[217], p4[218], p4[219], p4[220], p4[221], p4[222], p4[223] = torch.chunk(t3[13], 16, dim=1) | ||
265 | + p4[224], p4[225], p4[226], p4[227], p4[228], p4[229], p4[230], p4[231], p4[232], p4[233], p4[234], p4[235], p4[236], p4[237], p4[238], p4[239] = torch.chunk(t3[14], 16, dim=1) | ||
266 | + p4[240], p4[241], p4[242], p4[243], p4[244], p4[245], p4[246], p4[247], p4[248], p4[249], p4[250], p4[251], p4[252], p4[253], p4[254], p4[255] = torch.chunk(t3[15], 16, dim=1) | ||
267 | + | ||
268 | + p2_tensor = pad_sequence(p2, batch_first=True) | ||
269 | + p3_tensor = pad_sequence(p3, batch_first=True) | ||
270 | + p4_tensor = pad_sequence(p4, batch_first=True) | ||
271 | + | ||
272 | + cc = p2_tensor.unsqueeze(0) | ||
273 | + cc2 = p3_tensor.unsqueeze(0) | ||
274 | + cc3 = p4_tensor.unsqueeze(0) | ||
275 | + | ||
276 | + p2_cuda = cc.to(torch.device("cuda")) | ||
277 | + p3_cuda = cc2.to(torch.device("cuda")) | ||
278 | + p4_cuda = cc3.to(torch.device("cuda")) | ||
279 | + | ||
280 | + aug = T.ResizeShortestEdge( | ||
281 | + # [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST | ||
282 | + # [480, 480], cfg.INPUT.MAX_SIZE_TEST | ||
283 | + [768, 768], cfg.INPUT.MAX_SIZE_TEST | ||
284 | + ) | ||
285 | + image = cv2.imread(opt.valid_data_path + '000000'+ image_file_number +'.jpg') | ||
286 | + height, width = image.shape[:2] | ||
287 | + image = aug.get_transform(image).apply_image(image) | ||
288 | + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) | ||
289 | + inputs = [{"image": image, "height": height, "width": width}] | ||
290 | + | ||
291 | + with torch.no_grad(): | ||
292 | + images = model.preprocess_image(inputs) # don't forget to preprocess | ||
293 | + features = model.backbone(images.tensor) # set of cnn features | ||
294 | + features['p2'] = p2_cuda | ||
295 | + features['p3'] = p3_cuda | ||
296 | + features['p4'] = p4_cuda | ||
297 | + | ||
298 | + proposals, _ = model.proposal_generator(images, features, None) # RPN | ||
299 | + | ||
300 | + features_ = [features[f] for f in model.roi_heads.box_in_features] | ||
301 | + box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals]) | ||
302 | + box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates | ||
303 | + predictions = model.roi_heads.box_predictor(box_features) | ||
304 | + pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals) | ||
305 | + pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances) | ||
306 | + | ||
307 | + # output boxes, masks, scores, etc | ||
308 | + pred_instances = model._postprocess(pred_instances, inputs, images.image_sizes) # scale box to orig size | ||
309 | + # features of the proposed boxes | ||
310 | + feats = box_features[pred_inds] | ||
311 | + | ||
312 | + pred_category = pred_instances[0]["instances"].pred_classes.to("cpu") | ||
313 | + pred_segmentation = pred_instances[0]["instances"].pred_masks.to("cpu") | ||
314 | + pred_score = pred_instances[0]["instances"].scores.to("cpu") | ||
315 | + | ||
316 | + xxx = pred_category | ||
317 | + xxx = xxx.numpy() | ||
318 | + | ||
319 | + xxx = xxx + 1 | ||
320 | + | ||
321 | + for idx in range(len(xxx)): | ||
322 | + if -1 < int(xxx[idx]) < 12: | ||
323 | + xxx[idx] = xxx[idx] | ||
324 | + elif 11 < int(xxx[idx]) < 25: | ||
325 | + xxx[idx] = xxx[idx] + 1 | ||
326 | + elif 24 < int(xxx[idx]) < 27: | ||
327 | + xxx[idx] = xxx[idx] + 2 | ||
328 | + elif 26 < int(xxx[idx]) < 41: | ||
329 | + xxx[idx] = xxx[idx] + 4 | ||
330 | + elif 40 < int(xxx[idx]) < 61: | ||
331 | + xxx[idx] = xxx[idx] + 5 | ||
332 | + elif 60 < int(xxx[idx]) < 62: | ||
333 | + xxx[idx] = 67 | ||
334 | + elif 61 < int(xxx[idx]) < 63: | ||
335 | + xxx[idx] = 70 | ||
336 | + elif 62 < int(xxx[idx]) < 74: | ||
337 | + xxx[idx] = xxx[idx] + 9 | ||
338 | + else: | ||
339 | + xxx[idx] = xxx[idx] + 10 | ||
340 | + | ||
341 | + imgID = int(image_file_number) | ||
342 | + if image_idx == 0: | ||
343 | + anns = [] | ||
344 | + else: | ||
345 | + anns = anns | ||
346 | + | ||
347 | + for idx in range(len(pred_category.numpy())): | ||
348 | + | ||
349 | + anndata = {} | ||
350 | + anndata['image_id'] = imgID | ||
351 | + anndata['category_id'] = int(xxx[idx]) | ||
352 | + | ||
353 | + anndata['segmentation'] = encode(np.asfortranarray(pred_segmentation[idx].numpy())) | ||
354 | + anndata['score'] = float(pred_score[idx].numpy()) | ||
355 | + anns.append(anndata) | ||
356 | + | ||
357 | + image_idx = image_idx + 1 | ||
358 | + # print("###image###:{}".format(image_idx)) | ||
359 | + | ||
360 | +annType = ['segm','bbox','keypoints'] | ||
361 | +annType = annType[0] #specify type here | ||
362 | +prefix = 'instances' | ||
363 | +print('Running demo for *%s* results.'%(annType)) | ||
364 | +# imgIds = [560474] | ||
365 | + | ||
366 | +annFile = './instances_val2017_dataset100.json' | ||
367 | +cocoGt=COCO(annFile) | ||
368 | + | ||
369 | +#initialize COCO detections api | ||
370 | +resFile = anns | ||
371 | +cocoDt=cocoGt.loadRes(resFile) | ||
372 | + | ||
373 | +# running evaluation | ||
374 | +cocoEval = COCOeval(cocoGt,cocoDt,annType) | ||
375 | +# cocoEval.params.imgIds = imgIds | ||
376 | +# 맨 윗줄 | ||
377 | +cocoEval.evaluate() | ||
378 | +cocoEval.accumulate() | ||
379 | +cocoEval.summarize() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
code/vdsr/calculate_mAP_bicubic.py
0 → 100644
1 | +# # # [1] | ||
2 | +# # # install dependencies: | ||
3 | +# !pip install pyyaml==5.1 | ||
4 | +# import torch, torchvision | ||
5 | +# print(torch.__version__, torch.cuda.is_available()) | ||
6 | +# !gcc --version | ||
7 | +# # opencv is pre-installed on colab | ||
8 | + | ||
9 | +# # # [2] | ||
10 | +# # # install detectron2: (Colab has CUDA 10.1 + torch 1.8) | ||
11 | +# # # See https://detectron2.readthedocs.io/tutorials/install.html for instructions | ||
12 | +# import torch | ||
13 | +# assert torch.__version__.startswith("1.8") # need to manually install torch 1.8 if Colab changes its default version | ||
14 | +# !pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/torch1.8/index.html | ||
15 | +# # exit(0) # After installation, you need to "restart runtime" in Colab. This line can also restart runtime | ||
16 | + | ||
17 | +# # # [3] | ||
18 | +# # # Some basic setup: | ||
19 | +# # # Setup detectron2 logger | ||
20 | +# import detectron2 | ||
21 | +# from detectron2.utils.logger import setup_logger | ||
22 | +# setup_logger() | ||
23 | + | ||
24 | +# import some common libraries | ||
25 | +import torch | ||
26 | +import numpy as np | ||
27 | +import os, json, cv2, random, math | ||
28 | +from PIL import Image | ||
29 | +from torch.nn.utils.rnn import pad_sequence | ||
30 | + | ||
31 | +# import some common detectron2 utilities | ||
32 | +from detectron2 import model_zoo | ||
33 | +from detectron2.engine import DefaultPredictor | ||
34 | +from detectron2.config import get_cfg | ||
35 | +from detectron2.utils.visualizer import Visualizer | ||
36 | +from detectron2.data import MetadataCatalog, DatasetCatalog | ||
37 | +from detectron2.modeling import build_model, build_backbone | ||
38 | +from detectron2.checkpoint import DetectionCheckpointer | ||
39 | +from detectron2.utils.visualizer import Visualizer | ||
40 | +import detectron2.data.transforms as T | ||
41 | + | ||
42 | +from pycocotools.coco import COCO | ||
43 | +from pycocotools.cocoeval import COCOeval | ||
44 | +from pycocotools.mask import encode | ||
45 | +import argparse | ||
46 | + | ||
47 | +parser = argparse.ArgumentParser(description="PyTorch CARN") | ||
48 | +parser.add_argument("--valid_data_path", type=str) | ||
49 | +parser.add_argument("--rescale_factor", type=int, help="rescale factor for using in training") | ||
50 | +parser.add_argument("--model_name", type=str,choices= ["VDSR", "CARN", "SRRN","FRGAN"], default='CARN', help="Feature type for usingin training") | ||
51 | +parser.add_argument("--loss_type", type=str, choices= ["MSE", "L1", "SmoothL1","vgg_loss","ssim_loss","adv_loss","lpips"], default='MSE', help="loss type in training") | ||
52 | +parser.add_argument('--batch_size', type=int, default=256) | ||
53 | +opt = parser.parse_args() | ||
54 | +print(opt) | ||
55 | + | ||
56 | + | ||
57 | +def myRound(x): # 양수와 음수에 대해 0을 대칭으로 rounding | ||
58 | + abs_x = abs(x) | ||
59 | + val = np.int16(abs_x + 0.5) | ||
60 | + val2 = np.choose( | ||
61 | + x < 0, | ||
62 | + [ | ||
63 | + val, val*(-1) | ||
64 | + ] | ||
65 | + ) | ||
66 | + return val2 | ||
67 | + | ||
68 | +def myClip(x, maxV): | ||
69 | + val = np.choose( | ||
70 | + x > maxV, | ||
71 | + [ | ||
72 | + x, maxV | ||
73 | + ] | ||
74 | + ) | ||
75 | + return val | ||
76 | + | ||
77 | +image_idx = 0 | ||
78 | +cfg = get_cfg() | ||
79 | +# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library | ||
80 | +cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) | ||
81 | +cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model | ||
82 | +# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well | ||
83 | +cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") | ||
84 | + | ||
85 | +model = build_model(cfg) | ||
86 | +DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) | ||
87 | +model.eval() | ||
88 | + | ||
89 | +image_idx = 0 | ||
90 | +anns = 0 | ||
91 | + | ||
92 | +# Original_8bit | ||
93 | + | ||
94 | +image_files = ['001000', '002153', '008021', '009769', '009891', '015335', '017627', '018150', '018837', '022589'] | ||
95 | +image_files.extend(['022935', '023230', '024610', '025560', '025593', '027620', '155341', '161397', '165336', '166287']) | ||
96 | +image_files.extend(['166642', '169996', '172330', '172648', '176606', '176701', '179765', '180101', '186296', '250758']) | ||
97 | +image_files.extend(['259382', '267191', '287545', '287649', '289741', '293245', '308328', '309452', '335529', '337987']) | ||
98 | +image_files.extend(['338625', '344029', '350122', '389933', '393226', '395343', '395633', '401862', '402473', '402992']) | ||
99 | +image_files.extend(['404568', '406997', '408112', '410650', '414385', '414795', '415194', '415536', '416104', '416758']) | ||
100 | +image_files.extend(['427055', '428562', '430073', '433204', '447200', '447313', '448448', '452321', '453001', '458755']) | ||
101 | +image_files.extend(['462904', '463522', '464089', '468965', '469192', '469246', '471450', '474078', '474881', '475678']) | ||
102 | +image_files.extend(['475779', '537802', '542625', '543043', '543300', '543528', '547502', '550691', '553669', '567740']) | ||
103 | +image_files.extend(['570688', '570834', '571943', '573391', '574315', '575372', '575970', '578093', '579158', '581100']) | ||
104 | + | ||
105 | + | ||
106 | +for iter in range(0, 100): | ||
107 | + | ||
108 | + image_file_number = image_files[image_idx] | ||
109 | + aug = T.ResizeShortestEdge( | ||
110 | + # [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST | ||
111 | + # [480, 480], cfg.INPUT.MAX_SIZE_TEST | ||
112 | + [768, 768], cfg.INPUT.MAX_SIZE_TEST | ||
113 | + ) | ||
114 | + image_prefix = "COCO_val2017_" | ||
115 | + image = cv2.imread(opt.valid_data_path + '000000'+ image_file_number +'.jpg') | ||
116 | + # image = cv2.imread('./dataset/validset_100/000000'+ image_file_number +'.jpg') | ||
117 | + height, width = image.shape[:2] | ||
118 | + image = aug.get_transform(image).apply_image(image) | ||
119 | + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) | ||
120 | + inputs = [{"image": image, "height": height, "width": width}] | ||
121 | + with torch.no_grad(): | ||
122 | + images = model.preprocess_image(inputs) # don't forget to preprocess | ||
123 | + features = model.backbone(images.tensor) # set of cnn features | ||
124 | + | ||
125 | + | ||
126 | + p2_feature_original = features['p2'].to("cpu") | ||
127 | + p3_feature_original = features['p3'].to("cpu") | ||
128 | + p4_feature_original = features['p4'].to("cpu") | ||
129 | + | ||
130 | + bitDepth = 8 | ||
131 | + maxRange = [0, 0, 0, 0, 0] | ||
132 | + | ||
133 | + def maxVal(x): | ||
134 | + return pow(2, x) | ||
135 | + def offsetVal(x): | ||
136 | + return pow(2, x-1) | ||
137 | + | ||
138 | + def maxRange_layer(x): | ||
139 | + absolute_arr = torch.abs(x) * 2 | ||
140 | + max_arr = torch.max(absolute_arr) | ||
141 | + return torch.ceil(max_arr) | ||
142 | + | ||
143 | + | ||
144 | + act2 = p2_feature_original.squeeze() | ||
145 | + maxRange[0] = maxRange_layer(act2) | ||
146 | + | ||
147 | + act3 = p3_feature_original.squeeze() | ||
148 | + maxRange[1] = maxRange_layer(act3) | ||
149 | + | ||
150 | + act4 = p4_feature_original.squeeze() | ||
151 | + maxRange[2] = maxRange_layer(act4) | ||
152 | + | ||
153 | + globals()['maxRange_{}'.format(image_file_number)] = maxRange | ||
154 | + | ||
155 | + p2_feature_img = Image.open('/content/drive/MyDrive/result/bicubic_x{}/LR/p2/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p2' +'.png') | ||
156 | + p3_feature_img = Image.open('/content/drive/MyDrive/result/bicubic_x{}/LR/p3/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p3' +'.png') | ||
157 | + p4_feature_img = Image.open('/content/drive/MyDrive/result/bicubic_x{}/LR/p4/'.format(opt.rescale_factor) + image_prefix + '000000' + image_file_number + '_p4' +'.png') | ||
158 | + # p2_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p2.png' | ||
159 | + # p2_feature_img = Image.open('./result/{}/inference/{}_p2x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | ||
160 | + | ||
161 | + # # y_p2, cb, cr = p2_feature_img.split() | ||
162 | + p2_feature_arr = np.array(p2_feature_img) | ||
163 | + p2_feature_arr_round = myRound(p2_feature_arr) | ||
164 | + | ||
165 | + # p3_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p3.png') | ||
166 | + | ||
167 | + # p3_feature_img = Image.open('./result/{}/inference/{}_p3x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | ||
168 | + | ||
169 | + # # y_p3, cb2, cr2 = p3_feature_img.split() | ||
170 | + p3_feature_arr = np.array(p3_feature_img) | ||
171 | + p3_feature_arr_round = myRound(p3_feature_arr) | ||
172 | + | ||
173 | + # p4_feature_img = Image.open('./original/qp32/COCO_val2014_000000'+ image_file_number +'_p4.png') | ||
174 | + # p4_feature_img = Image.open('./result/{}/inference/{}_p4x{}/SR_{}.png'.format(opt.loss_type,opt.model_name,opt.rescale_factor,str(iter))) | ||
175 | + | ||
176 | + # y_p4, cb3, cr3 = p4_feature_img.split() | ||
177 | + p4_feature_arr = np.array(p4_feature_img) | ||
178 | + p4_feature_arr_round = myRound(p4_feature_arr) | ||
179 | + | ||
180 | + | ||
181 | + # 복원 | ||
182 | + recon_p2 = (((p2_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[0].numpy()) | ||
183 | + recon_p3 = (((p3_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[1].numpy()) | ||
184 | + recon_p4 = (((p4_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[2].numpy()) | ||
185 | + | ||
186 | + tensor_value = recon_p2 | ||
187 | + tensor_value2 = recon_p3 | ||
188 | + tensor_value3 = recon_p4 | ||
189 | + | ||
190 | + # # MSB 코드 끝 | ||
191 | + | ||
192 | + # lsb 및 원래 코드 | ||
193 | + # 복원 | ||
194 | + # recon_p2 = (((p2_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[0].numpy()) | ||
195 | + # recon_p3 = (((p3_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[1].numpy()) | ||
196 | + # recon_p4 = (((p4_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[2].numpy()) | ||
197 | + # recon_p5 = (((p5_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[3].numpy()) | ||
198 | + # recon_p6 = (((p6_feature_arr_round - offsetVal(bitDepth)) / maxVal(bitDepth)) * maxRange[4].numpy()) | ||
199 | + | ||
200 | + tensor_value = torch.as_tensor(recon_p2.astype("float32")) | ||
201 | + tensor_value2 = torch.as_tensor(recon_p3.astype("float32")) | ||
202 | + tensor_value3 = torch.as_tensor(recon_p4.astype("float32")) | ||
203 | + #lsb 및 원래 코드 끝 | ||
204 | + | ||
205 | + t = [None] * 16 | ||
206 | + t[0], t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[9], t[10], t[11], t[12], t[13], t[14], t[15] = torch.chunk(tensor_value, 16, dim=0) | ||
207 | + p2 = [None] * 256 | ||
208 | + | ||
209 | + t2 = [None] * 16 | ||
210 | + t2[0], t2[1], t2[2], t2[3], t2[4], t2[5], t2[6], t2[7], t2[8], t2[9], t2[10], t2[11], t2[12], t2[13], t2[14], t2[15] = torch.chunk(tensor_value2, 16, dim=0) | ||
211 | + p3 = [None] * 256 | ||
212 | + | ||
213 | + t3 = [None] * 16 | ||
214 | + t3[0], t3[1], t3[2], t3[3], t3[4], t3[5], t3[6], t3[7], t3[8], t3[9], t3[10], t3[11], t3[12], t3[13], t3[14], t3[15] = torch.chunk(tensor_value3, 16, dim=0) | ||
215 | + p4 = [None] * 256 | ||
216 | + | ||
217 | + p2[0], p2[1], p2[2], p2[3], p2[4], p2[5], p2[6], p2[7], p2[8], p2[9], p2[10], p2[11], p2[12], p2[13], p2[14], p2[15] = torch.chunk(t[0], 16, dim=1) | ||
218 | + p2[16], p2[17], p2[18], p2[19], p2[20], p2[21], p2[22], p2[23], p2[24], p2[25], p2[26], p2[27], p2[28], p2[29], p2[30], p2[31] = torch.chunk(t[1], 16, dim=1) | ||
219 | + p2[32], p2[33], p2[34], p2[35], p2[36], p2[37], p2[38], p2[39], p2[40], p2[41], p2[42], p2[43], p2[44], p2[45], p2[46], p2[47] = torch.chunk(t[2], 16, dim=1) | ||
220 | + p2[48], p2[49], p2[50], p2[51], p2[52], p2[53], p2[54], p2[55], p2[56], p2[57], p2[58], p2[59], p2[60], p2[61], p2[62], p2[63] = torch.chunk(t[3], 16, dim=1) | ||
221 | + p2[64], p2[65], p2[66], p2[67], p2[68], p2[69], p2[70], p2[71], p2[72], p2[73], p2[74], p2[75], p2[76], p2[77], p2[78], p2[79] = torch.chunk(t[4], 16, dim=1) | ||
222 | + p2[80], p2[81], p2[82], p2[83], p2[84], p2[85], p2[86], p2[87], p2[88], p2[89], p2[90], p2[91], p2[92], p2[93], p2[94], p2[95] = torch.chunk(t[5], 16, dim=1) | ||
223 | + p2[96], p2[97], p2[98], p2[99], p2[100], p2[101], p2[102], p2[103], p2[104], p2[105], p2[106], p2[107], p2[108], p2[109], p2[110], p2[111] = torch.chunk(t[6], 16, dim=1) | ||
224 | + p2[112], p2[113], p2[114], p2[115], p2[116], p2[117], p2[118], p2[119], p2[120], p2[121], p2[122], p2[123], p2[124], p2[125], p2[126], p2[127] = torch.chunk(t[7], 16, dim=1) | ||
225 | + p2[128], p2[129], p2[130], p2[131], p2[132], p2[133], p2[134], p2[135], p2[136], p2[137], p2[138], p2[139], p2[140], p2[141], p2[142], p2[143] = torch.chunk(t[8], 16, dim=1) | ||
226 | + p2[144], p2[145], p2[146], p2[147], p2[148], p2[149], p2[150], p2[151], p2[152], p2[153], p2[154], p2[155], p2[156], p2[157], p2[158], p2[159] = torch.chunk(t[9], 16, dim=1) | ||
227 | + p2[160], p2[161], p2[162], p2[163], p2[164], p2[165], p2[166], p2[167], p2[168], p2[169], p2[170], p2[171], p2[172], p2[173], p2[174], p2[175] = torch.chunk(t[10], 16, dim=1) | ||
228 | + p2[176], p2[177], p2[178], p2[179], p2[180], p2[181], p2[182], p2[183], p2[184], p2[185], p2[186], p2[187], p2[188], p2[189], p2[190], p2[191] = torch.chunk(t[11], 16, dim=1) | ||
229 | + p2[192], p2[193], p2[194], p2[195], p2[196], p2[197], p2[198], p2[199], p2[200], p2[201], p2[202], p2[203], p2[204], p2[205], p2[206], p2[207] = torch.chunk(t[12], 16, dim=1) | ||
230 | + p2[208], p2[209], p2[210], p2[211], p2[212], p2[213], p2[214], p2[215], p2[216], p2[217], p2[218], p2[219], p2[220], p2[221], p2[222], p2[223] = torch.chunk(t[13], 16, dim=1) | ||
231 | + p2[224], p2[225], p2[226], p2[227], p2[228], p2[229], p2[230], p2[231], p2[232], p2[233], p2[234], p2[235], p2[236], p2[237], p2[238], p2[239] = torch.chunk(t[14], 16, dim=1) | ||
232 | + p2[240], p2[241], p2[242], p2[243], p2[244], p2[245], p2[246], p2[247], p2[248], p2[249], p2[250], p2[251], p2[252], p2[253], p2[254], p2[255] = torch.chunk(t[15], 16, dim=1) | ||
233 | + | ||
234 | + p3[0], p3[1], p3[2], p3[3], p3[4], p3[5], p3[6], p3[7], p3[8], p3[9], p3[10], p3[11], p3[12], p3[13], p3[14], p3[15] = torch.chunk(t2[0], 16, dim=1) | ||
235 | + p3[16], p3[17], p3[18], p3[19], p3[20], p3[21], p3[22], p3[23], p3[24], p3[25], p3[26], p3[27], p3[28], p3[29], p3[30], p3[31] = torch.chunk(t2[1], 16, dim=1) | ||
236 | + p3[32], p3[33], p3[34], p3[35], p3[36], p3[37], p3[38], p3[39], p3[40], p3[41], p3[42], p3[43], p3[44], p3[45], p3[46], p3[47] = torch.chunk(t2[2], 16, dim=1) | ||
237 | + p3[48], p3[49], p3[50], p3[51], p3[52], p3[53], p3[54], p3[55], p3[56], p3[57], p3[58], p3[59], p3[60], p3[61], p3[62], p3[63] = torch.chunk(t2[3], 16, dim=1) | ||
238 | + p3[64], p3[65], p3[66], p3[67], p3[68], p3[69], p3[70], p3[71], p3[72], p3[73], p3[74], p3[75], p3[76], p3[77], p3[78], p3[79] = torch.chunk(t2[4], 16, dim=1) | ||
239 | + p3[80], p3[81], p3[82], p3[83], p3[84], p3[85], p3[86], p3[87], p3[88], p3[89], p3[90], p3[91], p3[92], p3[93], p3[94], p3[95] = torch.chunk(t2[5], 16, dim=1) | ||
240 | + p3[96], p3[97], p3[98], p3[99], p3[100], p3[101], p3[102], p3[103], p3[104], p3[105], p3[106], p3[107], p3[108], p3[109], p3[110], p3[111] = torch.chunk(t2[6], 16, dim=1) | ||
241 | + p3[112], p3[113], p3[114], p3[115], p3[116], p3[117], p3[118], p3[119], p3[120], p3[121], p3[122], p3[123], p3[124], p3[125], p3[126], p3[127] = torch.chunk(t2[7], 16, dim=1) | ||
242 | + p3[128], p3[129], p3[130], p3[131], p3[132], p3[133], p3[134], p3[135], p3[136], p3[137], p3[138], p3[139], p3[140], p3[141], p3[142], p3[143] = torch.chunk(t2[8], 16, dim=1) | ||
243 | + p3[144], p3[145], p3[146], p3[147], p3[148], p3[149], p3[150], p3[151], p3[152], p3[153], p3[154], p3[155], p3[156], p3[157], p3[158], p3[159] = torch.chunk(t2[9], 16, dim=1) | ||
244 | + p3[160], p3[161], p3[162], p3[163], p3[164], p3[165], p3[166], p3[167], p3[168], p3[169], p3[170], p3[171], p3[172], p3[173], p3[174], p3[175] = torch.chunk(t2[10], 16, dim=1) | ||
245 | + p3[176], p3[177], p3[178], p3[179], p3[180], p3[181], p3[182], p3[183], p3[184], p3[185], p3[186], p3[187], p3[188], p3[189], p3[190], p3[191] = torch.chunk(t2[11], 16, dim=1) | ||
246 | + p3[192], p3[193], p3[194], p3[195], p3[196], p3[197], p3[198], p3[199], p3[200], p3[201], p3[202], p3[203], p3[204], p3[205], p3[206], p3[207] = torch.chunk(t2[12], 16, dim=1) | ||
247 | + p3[208], p3[209], p3[210], p3[211], p3[212], p3[213], p3[214], p3[215], p3[216], p3[217], p3[218], p3[219], p3[220], p3[221], p3[222], p3[223] = torch.chunk(t2[13], 16, dim=1) | ||
248 | + p3[224], p3[225], p3[226], p3[227], p3[228], p3[229], p3[230], p3[231], p3[232], p3[233], p3[234], p3[235], p3[236], p3[237], p3[238], p3[239] = torch.chunk(t2[14], 16, dim=1) | ||
249 | + p3[240], p3[241], p3[242], p3[243], p3[244], p3[245], p3[246], p3[247], p3[248], p3[249], p3[250], p3[251], p3[252], p3[253], p3[254], p3[255] = torch.chunk(t2[15], 16, dim=1) | ||
250 | + | ||
251 | + p4[0], p4[1], p4[2], p4[3], p4[4], p4[5], p4[6], p4[7], p4[8], p4[9], p4[10], p4[11], p4[12], p4[13], p4[14], p4[15] = torch.chunk(t3[0], 16, dim=1) | ||
252 | + p4[16], p4[17], p4[18], p4[19], p4[20], p4[21], p4[22], p4[23], p4[24], p4[25], p4[26], p4[27], p4[28], p4[29], p4[30], p4[31] = torch.chunk(t3[1], 16, dim=1) | ||
253 | + p4[32], p4[33], p4[34], p4[35], p4[36], p4[37], p4[38], p4[39], p4[40], p4[41], p4[42], p4[43], p4[44], p4[45], p4[46], p4[47] = torch.chunk(t3[2], 16, dim=1) | ||
254 | + p4[48], p4[49], p4[50], p4[51], p4[52], p4[53], p4[54], p4[55], p4[56], p4[57], p4[58], p4[59], p4[60], p4[61], p4[62], p4[63] = torch.chunk(t3[3], 16, dim=1) | ||
255 | + p4[64], p4[65], p4[66], p4[67], p4[68], p4[69], p4[70], p4[71], p4[72], p4[73], p4[74], p4[75], p4[76], p4[77], p4[78], p4[79] = torch.chunk(t3[4], 16, dim=1) | ||
256 | + p4[80], p4[81], p4[82], p4[83], p4[84], p4[85], p4[86], p4[87], p4[88], p4[89], p4[90], p4[91], p4[92], p4[93], p4[94], p4[95] = torch.chunk(t3[5], 16, dim=1) | ||
257 | + p4[96], p4[97], p4[98], p4[99], p4[100], p4[101], p4[102], p4[103], p4[104], p4[105], p4[106], p4[107], p4[108], p4[109], p4[110], p4[111] = torch.chunk(t3[6], 16, dim=1) | ||
258 | + p4[112], p4[113], p4[114], p4[115], p4[116], p4[117], p4[118], p4[119], p4[120], p4[121], p4[122], p4[123], p4[124], p4[125], p4[126], p4[127] = torch.chunk(t3[7], 16, dim=1) | ||
259 | + p4[128], p4[129], p4[130], p4[131], p4[132], p4[133], p4[134], p4[135], p4[136], p4[137], p4[138], p4[139], p4[140], p4[141], p4[142], p4[143] = torch.chunk(t3[8], 16, dim=1) | ||
260 | + p4[144], p4[145], p4[146], p4[147], p4[148], p4[149], p4[150], p4[151], p4[152], p4[153], p4[154], p4[155], p4[156], p4[157], p4[158], p4[159] = torch.chunk(t3[9], 16, dim=1) | ||
261 | + p4[160], p4[161], p4[162], p4[163], p4[164], p4[165], p4[166], p4[167], p4[168], p4[169], p4[170], p4[171], p4[172], p4[173], p4[174], p4[175] = torch.chunk(t3[10], 16, dim=1) | ||
262 | + p4[176], p4[177], p4[178], p4[179], p4[180], p4[181], p4[182], p4[183], p4[184], p4[185], p4[186], p4[187], p4[188], p4[189], p4[190], p4[191] = torch.chunk(t3[11], 16, dim=1) | ||
263 | + p4[192], p4[193], p4[194], p4[195], p4[196], p4[197], p4[198], p4[199], p4[200], p4[201], p4[202], p4[203], p4[204], p4[205], p4[206], p4[207] = torch.chunk(t3[12], 16, dim=1) | ||
264 | + p4[208], p4[209], p4[210], p4[211], p4[212], p4[213], p4[214], p4[215], p4[216], p4[217], p4[218], p4[219], p4[220], p4[221], p4[222], p4[223] = torch.chunk(t3[13], 16, dim=1) | ||
265 | + p4[224], p4[225], p4[226], p4[227], p4[228], p4[229], p4[230], p4[231], p4[232], p4[233], p4[234], p4[235], p4[236], p4[237], p4[238], p4[239] = torch.chunk(t3[14], 16, dim=1) | ||
266 | + p4[240], p4[241], p4[242], p4[243], p4[244], p4[245], p4[246], p4[247], p4[248], p4[249], p4[250], p4[251], p4[252], p4[253], p4[254], p4[255] = torch.chunk(t3[15], 16, dim=1) | ||
267 | + | ||
268 | + p2_tensor = pad_sequence(p2, batch_first=True) | ||
269 | + p3_tensor = pad_sequence(p3, batch_first=True) | ||
270 | + p4_tensor = pad_sequence(p4, batch_first=True) | ||
271 | + | ||
272 | + cc = p2_tensor.unsqueeze(0) | ||
273 | + cc2 = p3_tensor.unsqueeze(0) | ||
274 | + cc3 = p4_tensor.unsqueeze(0) | ||
275 | + | ||
276 | + p2_cuda = cc.to(torch.device("cuda")) | ||
277 | + p3_cuda = cc2.to(torch.device("cuda")) | ||
278 | + p4_cuda = cc3.to(torch.device("cuda")) | ||
279 | + | ||
280 | + aug = T.ResizeShortestEdge( | ||
281 | + # [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST | ||
282 | + # [480, 480], cfg.INPUT.MAX_SIZE_TEST | ||
283 | + [768, 768], cfg.INPUT.MAX_SIZE_TEST | ||
284 | + ) | ||
285 | + image = cv2.imread(opt.valid_data_path + '000000'+ image_file_number +'.jpg') | ||
286 | + height, width = image.shape[:2] | ||
287 | + image = aug.get_transform(image).apply_image(image) | ||
288 | + image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) | ||
289 | + inputs = [{"image": image, "height": height, "width": width}] | ||
290 | + | ||
291 | + with torch.no_grad(): | ||
292 | + images = model.preprocess_image(inputs) # don't forget to preprocess | ||
293 | + features = model.backbone(images.tensor) # set of cnn features | ||
294 | + features['p2'] = p2_cuda | ||
295 | + features['p3'] = p3_cuda | ||
296 | + features['p4'] = p4_cuda | ||
297 | + | ||
298 | + proposals, _ = model.proposal_generator(images, features, None) # RPN | ||
299 | + | ||
300 | + features_ = [features[f] for f in model.roi_heads.box_in_features] | ||
301 | + box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals]) | ||
302 | + box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates | ||
303 | + predictions = model.roi_heads.box_predictor(box_features) | ||
304 | + pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals) | ||
305 | + pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances) | ||
306 | + | ||
307 | + # output boxes, masks, scores, etc | ||
308 | + pred_instances = model._postprocess(pred_instances, inputs, images.image_sizes) # scale box to orig size | ||
309 | + # features of the proposed boxes | ||
310 | + feats = box_features[pred_inds] | ||
311 | + | ||
312 | + pred_category = pred_instances[0]["instances"].pred_classes.to("cpu") | ||
313 | + pred_segmentation = pred_instances[0]["instances"].pred_masks.to("cpu") | ||
314 | + pred_score = pred_instances[0]["instances"].scores.to("cpu") | ||
315 | + | ||
316 | + xxx = pred_category | ||
317 | + xxx = xxx.numpy() | ||
318 | + | ||
319 | + xxx = xxx + 1 | ||
320 | + | ||
321 | + for idx in range(len(xxx)): | ||
322 | + if -1 < int(xxx[idx]) < 12: | ||
323 | + xxx[idx] = xxx[idx] | ||
324 | + elif 11 < int(xxx[idx]) < 25: | ||
325 | + xxx[idx] = xxx[idx] + 1 | ||
326 | + elif 24 < int(xxx[idx]) < 27: | ||
327 | + xxx[idx] = xxx[idx] + 2 | ||
328 | + elif 26 < int(xxx[idx]) < 41: | ||
329 | + xxx[idx] = xxx[idx] + 4 | ||
330 | + elif 40 < int(xxx[idx]) < 61: | ||
331 | + xxx[idx] = xxx[idx] + 5 | ||
332 | + elif 60 < int(xxx[idx]) < 62: | ||
333 | + xxx[idx] = 67 | ||
334 | + elif 61 < int(xxx[idx]) < 63: | ||
335 | + xxx[idx] = 70 | ||
336 | + elif 62 < int(xxx[idx]) < 74: | ||
337 | + xxx[idx] = xxx[idx] + 9 | ||
338 | + else: | ||
339 | + xxx[idx] = xxx[idx] + 10 | ||
340 | + | ||
341 | + imgID = int(image_file_number) | ||
342 | + if image_idx == 0: | ||
343 | + anns = [] | ||
344 | + else: | ||
345 | + anns = anns | ||
346 | + | ||
347 | + for idx in range(len(pred_category.numpy())): | ||
348 | + | ||
349 | + anndata = {} | ||
350 | + anndata['image_id'] = imgID | ||
351 | + anndata['category_id'] = int(xxx[idx]) | ||
352 | + | ||
353 | + anndata['segmentation'] = encode(np.asfortranarray(pred_segmentation[idx].numpy())) | ||
354 | + anndata['score'] = float(pred_score[idx].numpy()) | ||
355 | + anns.append(anndata) | ||
356 | + | ||
357 | + image_idx = image_idx + 1 | ||
358 | + # print("###image###:{}".format(image_idx)) | ||
359 | + | ||
360 | +annType = ['segm','bbox','keypoints'] | ||
361 | +annType = annType[0] #specify type here | ||
362 | +prefix = 'instances' | ||
363 | +print('Running demo for *%s* results.'%(annType)) | ||
364 | +# imgIds = [560474] | ||
365 | + | ||
366 | +annFile = './instances_val2017_dataset100.json' | ||
367 | +cocoGt=COCO(annFile) | ||
368 | + | ||
369 | +#initialize COCO detections api | ||
370 | +resFile = anns | ||
371 | +cocoDt=cocoGt.loadRes(resFile) | ||
372 | + | ||
373 | +# running evaluation | ||
374 | +cocoEval = COCOeval(cocoGt,cocoDt,annType) | ||
375 | +# cocoEval.params.imgIds = imgIds | ||
376 | +# 맨 윗줄 | ||
377 | +cocoEval.evaluate() | ||
378 | +cocoEval.accumulate() | ||
379 | +cocoEval.summarize() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
code/vdsr/checkpoint/model_epoch_6_p2.pth
0 → 100644
No preview for this file type
... | @@ -117,9 +117,7 @@ model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model" | ... | @@ -117,9 +117,7 @@ model = torch.load(opt.model, map_location=lambda storage, loc: storage)["model" |
117 | 117 | ||
118 | scales = [opt.scaleFactor] | 118 | scales = [opt.scaleFactor] |
119 | 119 | ||
120 | -# image_list = glob.glob(opt.dataset+"/*.*") | ||
121 | if opt.singleImage == "Y" : | 120 | if opt.singleImage == "Y" : |
122 | - # image_list = crop_feature(opt.dataset, opt.featureType, opt.scaleFactor) | ||
123 | image_list = opt.dataset | 121 | image_list = opt.dataset |
124 | else: | 122 | else: |
125 | image_path = os.path.join(opt.dataset, opt.featureType) | 123 | image_path = os.path.join(opt.dataset, opt.featureType) |
... | @@ -150,7 +148,6 @@ for scale in scales: | ... | @@ -150,7 +148,6 @@ for scale in scales: |
150 | f_bi = f_bi.astype(float) | 148 | f_bi = f_bi.astype(float) |
151 | features_bicubic.append(f_bi) | 149 | features_bicubic.append(f_bi) |
152 | psnr_bicubic = PSNR(f_bi, f_gt, shave_border=scale) | 150 | psnr_bicubic = PSNR(f_bi, f_gt, shave_border=scale) |
153 | - # psnr_bicubic = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_bi)) | ||
154 | avg_psnr_bicubic += psnr_bicubic | 151 | avg_psnr_bicubic += psnr_bicubic |
155 | 152 | ||
156 | f_input = f_bi/255. | 153 | f_input = f_bi/255. |
... | @@ -177,7 +174,6 @@ for scale in scales: | ... | @@ -177,7 +174,6 @@ for scale in scales: |
177 | f_sr = f_sr[0,:,:] | 174 | f_sr = f_sr[0,:,:] |
178 | 175 | ||
179 | psnr_predicted = PSNR(f_sr, f_gt, shave_border=scale) | 176 | psnr_predicted = PSNR(f_sr, f_gt, shave_border=scale) |
180 | - # psnr_predicted = PSNR_ver2(cv2.imread(f_gt), cv2.imread(f_sr)) | ||
181 | avg_psnr_predicted += psnr_predicted | 177 | avg_psnr_predicted += psnr_predicted |
182 | features.append(f_sr) | 178 | features.append(f_sr) |
183 | 179 | ||
... | @@ -187,23 +183,3 @@ for scale in scales: | ... | @@ -187,23 +183,3 @@ for scale in scales: |
187 | print("Dataset=", opt.dataset) | 183 | print("Dataset=", opt.dataset) |
188 | print("Average PSNR_predicted=", avg_psnr_predicted/count) | 184 | print("Average PSNR_predicted=", avg_psnr_predicted/count) |
189 | print("Average PSNR_bicubic=", avg_psnr_bicubic/count) | 185 | print("Average PSNR_bicubic=", avg_psnr_bicubic/count) |
190 | - | ||
191 | - | ||
192 | -# Show graph | ||
193 | -# f_gt = Image.fromarray(f_gt) | ||
194 | -# f_b = Image.fromarray(f_bi) | ||
195 | -# f_sr = Image.fromarray(f_sr) | ||
196 | - | ||
197 | -# fig = plt.figure(figsize=(18, 16), dpi= 80) | ||
198 | -# ax = plt.subplot("131") | ||
199 | -# ax.imshow(f_gt) | ||
200 | -# ax.set_title("GT") | ||
201 | - | ||
202 | -# ax = plt.subplot("132") | ||
203 | -# ax.imshow(f_bi) | ||
204 | -# ax.set_title("Input(bicubic)") | ||
205 | - | ||
206 | -# ax = plt.subplot("133") | ||
207 | -# ax.imshow(f_sr) | ||
208 | -# ax.set_title("Output(vdsr)") | ||
209 | -# plt.show() | ... | ... |
... | @@ -13,7 +13,7 @@ from datasets import get_training_data_loader | ... | @@ -13,7 +13,7 @@ from datasets import get_training_data_loader |
13 | # from feature_dataset import get_training_data_loader | 13 | # from feature_dataset import get_training_data_loader |
14 | # from make_dataset import make_dataset | 14 | # from make_dataset import make_dataset |
15 | import numpy as np | 15 | import numpy as np |
16 | -from dataFromH5 import Read_dataset_h5 | 16 | +# from dataFromH5 import Read_dataset_h5 |
17 | import matplotlib.pyplot as plt | 17 | import matplotlib.pyplot as plt |
18 | import math | 18 | import math |
19 | 19 | ... | ... |
code/vdsr/not_used/dataFromH5.py
0 → 100644
1 | +import torch.utils.data as data | ||
2 | +import torch | ||
3 | +import h5py | ||
4 | + | ||
5 | +class Read_dataset_h5(data.Dataset): | ||
6 | + def __init__(self, file_path): | ||
7 | + super(Read_dataset_h5, self).__init__() | ||
8 | + hf = h5py.File(file_path) | ||
9 | + self.input = hf.get('input') | ||
10 | + self.label = hf.get('label') | ||
11 | + | ||
12 | + def __getitem__(self, index): | ||
13 | + return torch.from_numpy(self.input[index,:,:,:]).float(), torch.from_numpy(self.label[index,:,:,:]).float() | ||
14 | + | ||
15 | + def __len__(self): | ||
16 | + return self.input.shape[0] |
code/vdsr/not_used/feature_dataset.py
0 → 100644
1 | +''' | ||
2 | +현재 사용하지 않음. | ||
3 | +''' | ||
4 | + | ||
5 | +# from torch.utils.data import Dataset | ||
6 | +# from PIL import Image | ||
7 | +# import os | ||
8 | +# from glob import glob | ||
9 | +# from torchvision import transforms | ||
10 | +# from torch.utils.data.dataset import Dataset | ||
11 | +# import torch | ||
12 | +# import pdb | ||
13 | +# import math | ||
14 | +# import numpy as np | ||
15 | +# import cv2 | ||
16 | + | ||
17 | + | ||
18 | +# class FeatureDataset(Dataset): | ||
19 | +# def __init__(self, data_path, datatype, rescale_factor, valid): | ||
20 | +# self.data_path = data_path | ||
21 | +# self.datatype = datatype | ||
22 | +# self.rescale_factor = rescale_factor | ||
23 | +# if not os.path.exists(data_path): | ||
24 | +# raise Exception(f"[!] {self.data_path} not existed") | ||
25 | +# if (valid): | ||
26 | +# self.hr_path = os.path.join(self.data_path, 'valid') | ||
27 | +# self.hr_path = os.path.join(self.hr_path, self.datatype) | ||
28 | +# else: | ||
29 | +# self.hr_path = os.path.join(self.data_path, 'LR_2') | ||
30 | +# self.hr_path = os.path.join(self.hr_path, self.datatype) | ||
31 | +# print(self.hr_path) | ||
32 | +# self.names = os.listdir(self.hr_path) | ||
33 | +# self.hr_path = sorted(glob(os.path.join(self.hr_path, "*.*"))) | ||
34 | +# self.hr_imgs = [] | ||
35 | + | ||
36 | +# w, h = Image.open(self.hr_path[0]).size | ||
37 | +# self.width = int(w / 16) | ||
38 | +# self.height = int(h / 16) | ||
39 | +# self.lwidth = int(self.width / self.rescale_factor) # rescale_factor만큼 크기를 줄인다. | ||
40 | +# self.lheight = int(self.height / self.rescale_factor) | ||
41 | +# print("lr: ({} {}), hr: ({} {})".format(self.lwidth, self.lheight, self.width, self.height)) | ||
42 | + | ||
43 | +# self.original_hr_imgs = [] #원본 250개 | ||
44 | +# print("crop features ...") | ||
45 | +# for hr in self.hr_path: # 256개의 피쳐로 나눈다. | ||
46 | +# hr_cropped_imgs = [] | ||
47 | +# hr_image = Image.open(hr) # .convert('RGB')\ | ||
48 | +# self.original_hr_imgs.append(np.array(hr_image).astype(float)) # 원본을 저장한다. | ||
49 | +# for i in range(16): | ||
50 | +# for j in range(16): | ||
51 | +# (left, upper, right, lower) = ( | ||
52 | +# i * self.width, j * self.height, (i + 1) * self.width, (j + 1) * self.height) | ||
53 | +# crop = hr_image.crop((left, upper, right, lower)) | ||
54 | +# hr_cropped_imgs.append(crop) | ||
55 | +# self.hr_imgs.append(hr_cropped_imgs) | ||
56 | + | ||
57 | +# self.final_results = [] # [250개] | ||
58 | +# print("resize and concat features ...") | ||
59 | +# for i in range(0, len(self.hr_imgs)): | ||
60 | +# hr_img = self.hr_imgs[i] | ||
61 | +# interpolated_images = [] | ||
62 | +# for img in hr_img: | ||
63 | +# image = img.resize((self.lwidth, self.lheight), Image.BICUBIC) | ||
64 | +# image = image.resize((self.width, self.height), Image.BICUBIC) | ||
65 | +# interpolated_images.append(np.array(image).astype(float)) | ||
66 | +# self.final_results.append(concatFeatures(interpolated_images, self.names[i], self.datatype)) | ||
67 | +# print(self.original_hr_imgs) | ||
68 | +# print(self.final_results) | ||
69 | + | ||
70 | +# def __getitem__(self, idx): | ||
71 | +# ground_truth = self.original_hr_imgs[idx] | ||
72 | +# final_result = self.final_results[idx] # list | ||
73 | +# return transforms.ToTensor()(final_result), transforms.ToTensor()(ground_truth) # hr_image를 변환한 것과, 변환하지 않은 것을 Tensor로 각각 반환 | ||
74 | + | ||
75 | +# def __len__(self): | ||
76 | +# return len(self.hr_path) | ||
77 | + | ||
78 | + | ||
79 | +# def concatFeatures(features, image_name, feature_type): | ||
80 | +# features_0 = features[:16] | ||
81 | +# features_1 = features[16:32] | ||
82 | +# features_2 = features[32:48] | ||
83 | +# features_3 = features[48:64] | ||
84 | +# features_4 = features[64:80] | ||
85 | +# features_5 = features[80:96] | ||
86 | +# features_6 = features[96:112] | ||
87 | +# features_7 = features[112:128] | ||
88 | +# features_8 = features[128:144] | ||
89 | +# features_9 = features[144:160] | ||
90 | +# features_10 = features[160:176] | ||
91 | +# features_11 = features[176:192] | ||
92 | +# features_12 = features[192:208] | ||
93 | +# features_13 = features[208:224] | ||
94 | +# features_14 = features[224:240] | ||
95 | +# features_15 = features[240:256] | ||
96 | + | ||
97 | +# features_new = list() | ||
98 | +# features_new.extend([ | ||
99 | +# concat_vertical(features_0), | ||
100 | +# concat_vertical(features_1), | ||
101 | +# concat_vertical(features_2), | ||
102 | +# concat_vertical(features_3), | ||
103 | +# concat_vertical(features_4), | ||
104 | +# concat_vertical(features_5), | ||
105 | +# concat_vertical(features_6), | ||
106 | +# concat_vertical(features_7), | ||
107 | +# concat_vertical(features_8), | ||
108 | +# concat_vertical(features_9), | ||
109 | +# concat_vertical(features_10), | ||
110 | +# concat_vertical(features_11), | ||
111 | +# concat_vertical(features_12), | ||
112 | +# concat_vertical(features_13), | ||
113 | +# concat_vertical(features_14), | ||
114 | +# concat_vertical(features_15) | ||
115 | +# ]) | ||
116 | + | ||
117 | +# final_concat_feature = concat_horizontal(features_new) | ||
118 | + | ||
119 | +# save_path = "features/LR_2/" + feature_type + "/" + image_name | ||
120 | +# if not os.path.exists("features/"): | ||
121 | +# os.makedirs("features/") | ||
122 | +# if not os.path.exists("features/LR_2/"): | ||
123 | +# os.makedirs("features/LR_2/") | ||
124 | +# if not os.path.exists("features/LR_2/" + feature_type): | ||
125 | +# os.makedirs("features/LR_2/" + feature_type) | ||
126 | +# cv2.imwrite(save_path, final_concat_feature) | ||
127 | + | ||
128 | +# return np.array(final_concat_feature).astype(float) | ||
129 | + | ||
130 | +# def concat_horizontal(feature): | ||
131 | +# result = cv2.hconcat([feature[0], feature[1]]) | ||
132 | +# for i in range(2, len(feature)): | ||
133 | +# result = cv2.hconcat([result, feature[i]]) | ||
134 | +# return result | ||
135 | + | ||
136 | +# def concat_vertical(feature): | ||
137 | +# result = cv2.vconcat([feature[0], feature[1]]) | ||
138 | +# for i in range(2, len(feature)): | ||
139 | +# result = cv2.vconcat([result, feature[i]]) | ||
140 | +# return result | ||
141 | + | ||
142 | + | ||
143 | +# def get_data_loader_test_version(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
144 | +# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False) | ||
145 | +# print("dataset의 사이즈는 {}".format(len(full_dataset))) | ||
146 | +# for f in full_dataset: | ||
147 | +# print(type(f)) | ||
148 | + | ||
149 | + | ||
150 | +# def get_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
151 | +# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False) | ||
152 | +# train_size = int(0.9 * len(full_dataset)) | ||
153 | +# test_size = len(full_dataset) - train_size | ||
154 | +# train_dataset, test_dataset = torch.utils.data.random_split(full_dataset, [train_size, test_size]) | ||
155 | +# torch.manual_seed(3334) | ||
156 | +# train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, | ||
157 | +# num_workers=num_workers, pin_memory=False) | ||
158 | +# test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, | ||
159 | +# num_workers=num_workers, pin_memory=True) | ||
160 | + | ||
161 | +# return train_loader, test_loader | ||
162 | + | ||
163 | + | ||
164 | +# def get_training_data_loader(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
165 | +# full_dataset = FeatureDataset(data_path, feature_type, rescale_factor, False) | ||
166 | +# torch.manual_seed(3334) | ||
167 | +# train_loader = torch.utils.data.DataLoader(dataset=full_dataset, batch_size=batch_size, shuffle=True, | ||
168 | +# num_workers=num_workers, pin_memory=False) | ||
169 | +# return train_loader | ||
170 | + | ||
171 | + | ||
172 | +# def get_infer_dataloader(data_path, feature_type, rescale_factor, batch_size, num_workers): | ||
173 | +# dataset = FeatureDataset(data_path, feature_type, rescale_factor, True) | ||
174 | +# data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False, | ||
175 | +# num_workers=num_workers, pin_memory=False) | ||
176 | +# return data_loader | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
code/vdsr/not_used/make_dataset.py
0 → 100644
1 | +''' | ||
2 | +현재 사용하지 않음. | ||
3 | +''' | ||
4 | + | ||
5 | + | ||
6 | +# from crop_feature import crop_feature | ||
7 | +# import os | ||
8 | +# from PIL import Image | ||
9 | +# import numpy as np | ||
10 | +# import torch | ||
11 | +# from torch.utils.data.dataset import Dataset | ||
12 | +# from torch.utils.data import TensorDataset, DataLoader | ||
13 | +# from torchvision import transforms | ||
14 | +# import cv2 | ||
15 | +# import glob | ||
16 | +# import h5py | ||
17 | +# import argparse | ||
18 | + | ||
19 | +# parser = argparse.ArgumentParser(description="make Dataset") | ||
20 | +# parser.add_argument("--dataset", type=str) | ||
21 | +# parser.add_argument("--featureType", type=str) | ||
22 | +# parser.add_argument("--scaleFactor", type=int) | ||
23 | +# parser.add_argument("--batchSize", type=int, default=16) | ||
24 | +# parser.add_argument("--threads", type=int, default=3) | ||
25 | + | ||
26 | +# # dataset, feature_type, scale_factor, batch_size, num_workers | ||
27 | +# def main(): | ||
28 | +# opt = parser.parse_args() | ||
29 | + | ||
30 | +# dataset = opt.dataset | ||
31 | +# feature_type = opt.featureType | ||
32 | +# scale_factor = opt.scaleFactor | ||
33 | +# batch_size = opt.batchSize | ||
34 | +# num_workers = opt.threads | ||
35 | + | ||
36 | +# print_message = True | ||
37 | +# dataset = dataset+"/LR_2" | ||
38 | +# image_path = os.path.join(dataset, feature_type) | ||
39 | +# image_list = os.listdir(image_path) | ||
40 | +# input = list() | ||
41 | +# label = list() | ||
42 | + | ||
43 | +# for image in image_list: | ||
44 | +# origin_image = Image.open(os.path.join(image_path,image)) | ||
45 | +# label.append(np.array(origin_image).astype(float)) | ||
46 | +# image_cropped = crop_feature(os.path.join(image_path, image), feature_type, scale_factor, print_message) | ||
47 | +# print_message = False | ||
48 | +# # bicubic interpolation | ||
49 | +# reconstructed_features = list() | ||
50 | +# print("crop is done.") | ||
51 | +# for crop in image_cropped: | ||
52 | +# w, h = crop.size | ||
53 | +# bicubic_interpolated_image = crop.resize((w//scale_factor, h//scale_factor), Image.BICUBIC) | ||
54 | +# bicubic_interpolated_image = bicubic_interpolated_image.resize((w,h), Image.BICUBIC) # 다시 원래 크기로 키우기 | ||
55 | +# reconstructed_features.append(np.array(bicubic_interpolated_image).astype(float)) | ||
56 | +# input.append(concatFeatures(reconstructed_features, image, feature_type)) | ||
57 | + | ||
58 | +# print("concat is done.") | ||
59 | +# if len(input) == len(label): | ||
60 | +# save_h5(input, label, 'data/train_{}.h5'.format(feature_type)) | ||
61 | +# print("saved..") | ||
62 | +# else: | ||
63 | +# print(len(input), len(label), "이 다릅니다.") | ||
64 | + | ||
65 | + | ||
66 | +# def concatFeatures(features, image_name, feature_type): | ||
67 | +# features_0 = features[:16] | ||
68 | +# features_1 = features[16:32] | ||
69 | +# features_2 = features[32:48] | ||
70 | +# features_3 = features[48:64] | ||
71 | +# features_4 = features[64:80] | ||
72 | +# features_5 = features[80:96] | ||
73 | +# features_6 = features[96:112] | ||
74 | +# features_7 = features[112:128] | ||
75 | +# features_8 = features[128:144] | ||
76 | +# features_9 = features[144:160] | ||
77 | +# features_10 = features[160:176] | ||
78 | +# features_11 = features[176:192] | ||
79 | +# features_12 = features[192:208] | ||
80 | +# features_13 = features[208:224] | ||
81 | +# features_14 = features[224:240] | ||
82 | +# features_15 = features[240:256] | ||
83 | + | ||
84 | +# features_new = list() | ||
85 | +# features_new.extend([ | ||
86 | +# concat_vertical(features_0), | ||
87 | +# concat_vertical(features_1), | ||
88 | +# concat_vertical(features_2), | ||
89 | +# concat_vertical(features_3), | ||
90 | +# concat_vertical(features_4), | ||
91 | +# concat_vertical(features_5), | ||
92 | +# concat_vertical(features_6), | ||
93 | +# concat_vertical(features_7), | ||
94 | +# concat_vertical(features_8), | ||
95 | +# concat_vertical(features_9), | ||
96 | +# concat_vertical(features_10), | ||
97 | +# concat_vertical(features_11), | ||
98 | +# concat_vertical(features_12), | ||
99 | +# concat_vertical(features_13), | ||
100 | +# concat_vertical(features_14), | ||
101 | +# concat_vertical(features_15) | ||
102 | +# ]) | ||
103 | + | ||
104 | +# final_concat_feature = concat_horizontal(features_new) | ||
105 | + | ||
106 | +# save_path = "features/LR_2/" + feature_type + "/" + image_name | ||
107 | +# if not os.path.exists("features/"): | ||
108 | +# os.makedirs("features/") | ||
109 | +# if not os.path.exists("features/LR_2/"): | ||
110 | +# os.makedirs("features/LR_2/") | ||
111 | +# if not os.path.exists("features/LR_2/" + feature_type): | ||
112 | +# os.makedirs("features/LR_2/" + feature_type) | ||
113 | +# cv2.imwrite(save_path, final_concat_feature) | ||
114 | + | ||
115 | +# return np.array(final_concat_feature).astype(float) | ||
116 | + | ||
117 | +# def concat_horizontal(feature): | ||
118 | +# result = cv2.hconcat([feature[0], feature[1]]) | ||
119 | +# for i in range(2, len(feature)): | ||
120 | +# result = cv2.hconcat([result, feature[i]]) | ||
121 | +# return result | ||
122 | + | ||
123 | +# def concat_vertical(feature): | ||
124 | +# result = cv2.vconcat([feature[0], feature[1]]) | ||
125 | +# for i in range(2, len(feature)): | ||
126 | +# result = cv2.vconcat([result, feature[i]]) | ||
127 | +# return result | ||
128 | + | ||
129 | +# def save_h5(sub_ip, sub_la, savepath): | ||
130 | +# if not os.path.exists("data/"): | ||
131 | +# os.makedirs("data/") | ||
132 | + | ||
133 | +# path = os.path.join(os.getcwd(), savepath) | ||
134 | +# with h5py.File(path, 'w') as hf: | ||
135 | +# hf.create_dataset('input', data=sub_ip) | ||
136 | +# hf.create_dataset('label', data=sub_la) | ||
137 | + | ||
138 | +# if __name__ == "__main__": | ||
139 | +# main() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
code/vdsr/outputs/p2/HR_p2.png
0 → 100644

7.26 MB
code/vdsr/outputs/p2/LR_x3_p2.png
0 → 100644

4.71 MB
code/vdsr/outputs/p2/SR_x3_p2.png
0 → 100644

5.12 MB
code/vdsr/outputs/p3/HR_p3.png
0 → 100644

1.83 MB
code/vdsr/outputs/p3/LR_x3_p3.png
0 → 100644

1.41 MB
code/vdsr/outputs/p3/SR_x3_p3.png
0 → 100644

1.41 MB
code/vdsr/outputs/p4/HR_p4.png
0 → 100644

466 KB
code/vdsr/outputs/p4/LR_x3_p4.png
0 → 100644

363 KB
code/vdsr/outputs/p4/SR_x3_p4.png
0 → 100644

363 KB
최종보고서/발표ppt_2017103084_서민정.pptx
0 → 100644
No preview for this file type
최종보고서/최종보고서.docx
0 → 100644
No preview for this file type
-
Please register or login to post a comment