장윤호

init

Showing 106 changed files with 2345 additions and 0 deletions
This diff is collapsed. Click to expand it.
File mode changed
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
1 +# code in this file is adpated from rpmcruz/autoaugment
2 +# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
3 +import random
4 +
5 +import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
6 +import numpy as np
7 +import torch
8 +from PIL import Image
9 +
10 +
11 +def ShearX(img, v): # [-0.3, 0.3]
12 + assert -0.3 <= v <= 0.3
13 + if random.random() > 0.5:
14 + v = -v
15 + return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
16 +
17 +
18 +def ShearY(img, v): # [-0.3, 0.3]
19 + assert -0.3 <= v <= 0.3
20 + if random.random() > 0.5:
21 + v = -v
22 + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
23 +
24 +
25 +def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
26 + assert -0.45 <= v <= 0.45
27 + if random.random() > 0.5:
28 + v = -v
29 + v = v * img.size[0]
30 + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
31 +
32 +
33 +def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
34 + assert 0 <= v
35 + if random.random() > 0.5:
36 + v = -v
37 + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
38 +
39 +
40 +def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
41 + assert -0.45 <= v <= 0.45
42 + if random.random() > 0.5:
43 + v = -v
44 + v = v * img.size[1]
45 + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
46 +
47 +
48 +def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
49 + assert 0 <= v
50 + if random.random() > 0.5:
51 + v = -v
52 + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
53 +
54 +
55 +def Rotate(img, v): # [-30, 30]
56 + assert -30 <= v <= 30
57 + if random.random() > 0.5:
58 + v = -v
59 + return img.rotate(v)
60 +
61 +
62 +def AutoContrast(img, _):
63 + return PIL.ImageOps.autocontrast(img)
64 +
65 +
66 +def Invert(img, _):
67 + return PIL.ImageOps.invert(img)
68 +
69 +
70 +def Equalize(img, _):
71 + return PIL.ImageOps.equalize(img)
72 +
73 +
74 +def Flip(img, _): # not from the paper
75 + return PIL.ImageOps.mirror(img)
76 +
77 +
78 +def Solarize(img, v): # [0, 256]
79 + assert 0 <= v <= 256
80 + return PIL.ImageOps.solarize(img, v)
81 +
82 +
83 +def SolarizeAdd(img, addition=0, threshold=128):
84 + img_np = np.array(img).astype(np.int)
85 + img_np = img_np + addition
86 + img_np = np.clip(img_np, 0, 255)
87 + img_np = img_np.astype(np.uint8)
88 + img = Image.fromarray(img_np)
89 + return PIL.ImageOps.solarize(img, threshold)
90 +
91 +
92 +def Posterize(img, v): # [4, 8]
93 + v = int(v)
94 + v = max(1, v)
95 + return PIL.ImageOps.posterize(img, v)
96 +
97 +
98 +def Contrast(img, v): # [0.1,1.9]
99 + assert 0.1 <= v <= 1.9
100 + return PIL.ImageEnhance.Contrast(img).enhance(v)
101 +
102 +
103 +def Color(img, v): # [0.1,1.9]
104 + assert 0.1 <= v <= 1.9
105 + return PIL.ImageEnhance.Color(img).enhance(v)
106 +
107 +
108 +def Brightness(img, v): # [0.1,1.9]
109 + assert 0.1 <= v <= 1.9
110 + return PIL.ImageEnhance.Brightness(img).enhance(v)
111 +
112 +
113 +def Sharpness(img, v): # [0.1,1.9]
114 + assert 0.1 <= v <= 1.9
115 + return PIL.ImageEnhance.Sharpness(img).enhance(v)
116 +
117 +
118 +def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
119 + assert 0.0 <= v <= 0.2
120 + if v <= 0.:
121 + return img
122 +
123 + v = v * img.size[0]
124 + return CutoutAbs(img, v)
125 +
126 +
127 +def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
128 + # assert 0 <= v <= 20
129 + if v < 0:
130 + return img
131 + w, h = img.size
132 + x0 = np.random.uniform(w)
133 + y0 = np.random.uniform(h)
134 +
135 + x0 = int(max(0, x0 - v / 2.))
136 + y0 = int(max(0, y0 - v / 2.))
137 + x1 = min(w, x0 + v)
138 + y1 = min(h, y0 + v)
139 +
140 + xy = (x0, y0, x1, y1)
141 + color = (125, 123, 114)
142 + # color = (0, 0, 0)
143 + img = img.copy()
144 + PIL.ImageDraw.Draw(img).rectangle(xy, color)
145 + return img
146 +
147 +
148 +def SamplePairing(imgs): # [0, 0.4]
149 + def f(img1, v):
150 + i = np.random.choice(len(imgs))
151 + img2 = PIL.Image.fromarray(imgs[i])
152 + return PIL.Image.blend(img1, img2, v)
153 +
154 + return f
155 +
156 +
157 +def Identity(img, v):
158 + return img
159 +
160 +
161 +def augment_list(): # 16 oeprations and their ranges
162 + # https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
163 + # l = [
164 + # (Identity, 0., 1.0),
165 + # (ShearX, 0., 0.3), # 0
166 + # (ShearY, 0., 0.3), # 1
167 + # (TranslateX, 0., 0.33), # 2
168 + # (TranslateY, 0., 0.33), # 3
169 + # (Rotate, 0, 30), # 4
170 + # (AutoContrast, 0, 1), # 5
171 + # (Invert, 0, 1), # 6
172 + # (Equalize, 0, 1), # 7
173 + # (Solarize, 0, 110), # 8
174 + # (Posterize, 4, 8), # 9
175 + # # (Contrast, 0.1, 1.9), # 10
176 + # (Color, 0.1, 1.9), # 11
177 + # (Brightness, 0.1, 1.9), # 12
178 + # (Sharpness, 0.1, 1.9), # 13
179 + # # (Cutout, 0, 0.2), # 14
180 + # # (SamplePairing(imgs), 0, 0.4), # 15
181 + # ]
182 +
183 + # https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
184 + l = [
185 + (AutoContrast, 0, 1),
186 + (Equalize, 0, 1),
187 + (Invert, 0, 1),
188 + (Rotate, 0, 30),
189 + (Posterize, 0, 4),
190 + (Solarize, 0, 256),
191 + (SolarizeAdd, 0, 110),
192 + (Color, 0.1, 1.9),
193 + (Contrast, 0.1, 1.9),
194 + (Brightness, 0.1, 1.9),
195 + (Sharpness, 0.1, 1.9),
196 + (ShearX, 0., 0.3),
197 + (ShearY, 0., 0.3),
198 + (CutoutAbs, 0, 40),
199 + (TranslateXabs, 0., 100),
200 + (TranslateYabs, 0., 100),
201 + ]
202 +
203 + return l
204 +
205 +
206 +class Lighting(object):
207 + """Lighting noise(AlexNet - style PCA - based noise)"""
208 +
209 + def __init__(self, alphastd, eigval, eigvec):
210 + self.alphastd = alphastd
211 + self.eigval = torch.Tensor(eigval)
212 + self.eigvec = torch.Tensor(eigvec)
213 +
214 + def __call__(self, img):
215 + if self.alphastd == 0:
216 + return img
217 +
218 + alpha = img.new().resize_(3).normal_(0, self.alphastd)
219 + rgb = self.eigvec.type_as(img).clone() \
220 + .mul(alpha.view(1, 3).expand(3, 3)) \
221 + .mul(self.eigval.view(1, 3).expand(3, 3)) \
222 + .sum(1).squeeze()
223 +
224 + return img.add(rgb.view(3, 1, 1).expand_as(img))
225 +
226 +
227 +class CutoutDefault(object):
228 + """
229 + Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
230 + """
231 + def __init__(self, length):
232 + self.length = length
233 +
234 + def __call__(self, img):
235 + h, w = img.size(1), img.size(2)
236 + mask = np.ones((h, w), np.float32)
237 + y = np.random.randint(h)
238 + x = np.random.randint(w)
239 +
240 + y1 = np.clip(y - self.length // 2, 0, h)
241 + y2 = np.clip(y + self.length // 2, 0, h)
242 + x1 = np.clip(x - self.length // 2, 0, w)
243 + x2 = np.clip(x + self.length // 2, 0, w)
244 +
245 + mask[y1: y2, x1: x2] = 0.
246 + mask = torch.from_numpy(mask)
247 + mask = mask.expand_as(img)
248 + img *= mask
249 + return img
250 +
251 +
252 +class RandAugment:
253 + def __init__(self, n, m):
254 + self.n = n # augmentation을 적용하는 수.
255 + self.m = m # [0, 30]
256 + self.augment_list = augment_list()
257 +
258 + def __call__(self, img):
259 + ops = random.choices(self.augment_list, k=self.n)
260 + for op, minval, maxval in ops:
261 + val = (float(self.m) / 30) * float(maxval - minval) + minval
262 + img = op(img, val)
263 + return img
...\ No newline at end of file ...\ No newline at end of file
1 +task: All
2 +modelname: MobilenetV3
3 +output: output
4 +checkpoint: "output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar"
5 +gpu: [2]
6 +data:
7 + train: ../data/Fifth_data/All
8 + val: ../data/Fifth_data/All
9 + test: ../data/Fifth_data/All
10 +train:
11 + epochs: 3000
12 + start-epoch: 0
13 + batch-size: 256
14 + worker: 16
15 + resume: ''
16 + augment: True
17 + size: 224
18 + confidence: False
19 + weight: [1., 1., 1., 1., 1., 1., 1., 1.] #Crack, Double, Empty, Flip, Leave, Normal, Pollute, Scratch
20 +predict:
21 + batch-size: 256
22 + worker: 16
23 + cam: False
24 + normalize: True
25 + save: False
26 +optimizer:
27 + type: 'Adam'
28 + lr: 0.001
29 + momentum: 0.9
30 + weight_decay: 0.0001
31 +loss:
32 + gamma: 2.
33 + alpha: 0.8
34 +model:
35 + blocks: 6
36 + class: 8
37 +etc:
38 + tensorboard: False
39 + print_freq: 10
...\ No newline at end of file ...\ No newline at end of file
1 +task: Type
2 +modelname: MobilenetV3
3 +output: output
4 +checkpoint: "output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar"
5 +gpu: [1]
6 +data:
7 + train: ../data/Fifth_data/ErrorType
8 + val: ../data/Fifth_data/ErrorType
9 + test: ../data/Fifth_data/ErrorType
10 +train:
11 + epochs: 3000
12 + start-epoch: 0
13 + batch-size: 256
14 + worker: 16
15 + resume: ''
16 + augment: True
17 + size: 64
18 + confidence: False
19 + weight: [1., 1., 1., 1., 1., 1., 1.] #Crack, Double, Empty, Flip, Leave, Scratch
20 +predict:
21 + batch-size: 256
22 + worker: 16
23 + cam: False
24 + normalize: True
25 + save: False
26 +optimizer:
27 + type: 'SGD'
28 + lr: 0.1
29 + momentum: 0.9
30 + weight_decay: 0.0001
31 +loss:
32 + gamma: 2.
33 + alpha: 0.8
34 +model:
35 + blocks: 4
36 + class: 7
37 +etc:
38 + tensorboard: False
39 + print_freq: 10
...\ No newline at end of file ...\ No newline at end of file
1 +task: Error
2 +modelname: MobilenetV3
3 +output: output
4 +checkpoint: "output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar"
5 +gpu: [1]
6 +data:
7 + train: ../data/Fifth_data/Error
8 + val: ../data/Fifth_data/Error
9 + test: ../data/Fifth_data/Error
10 +train:
11 + epochs: 3000
12 + start-epoch: 0
13 + batch-size: 256
14 + worker: 16
15 + resume: ''
16 + augment: True
17 + size: 64
18 + confidence: False
19 + weight: [1., 1.] #Error , Normal
20 +predict:
21 + batch-size: 256
22 + worker: 16
23 + cam: False
24 + cam-class: "Error"
25 + normalize: True
26 + save: False
27 +optimizer:
28 + type: 'SGD'
29 + lr: 0.1
30 + momentum: 0.9
31 + weight_decay: 0.0001
32 +loss:
33 + gamma: 2.
34 + alpha: 0.8
35 +model:
36 + blocks: 4
37 + class: 2
38 +etc:
39 + tensorboard: False
40 + print_freq: 10
...\ No newline at end of file ...\ No newline at end of file
1 +import torch
2 +import torchvision
3 +import torch.nn as nn
4 +import argparse
5 +from model import AutoEncoder, pytorch_autoencoder
6 +from get_mean_std import get_params
7 +from torchvision.utils import save_image
8 +
9 +
10 +parser = argparse.ArgumentParser(description='Process autoencoder')
11 +parser.add_argument('--config', type=str, help='select type')
12 +args = parser.parse_args()
13 +
14 +# Scratch에서만 넣은 데이터
15 +data_path = "../data/Fourth_data/Auto_test"
16 +checkpoint_path = "./dc_img/checkpoint.pth"
17 +resize_size = 128
18 +batch_size = 128
19 +
20 +# 보고서를 참고하여 만든 autoencoder 와 pytorch 에서 제공하는 autoencoder
21 +if args.config == "my":
22 + model = AutoEncoder().cuda("cuda:1")
23 +else:
24 + model = pytorch_autoencoder().cuda("cuda:1")
25 +
26 +checkpoint = torch.load(checkpoint_path)
27 +model.load_state_dict(checkpoint)
28 +print("checkpoint loaded finish!")
29 +
30 +img_transform = torchvision.transforms.Compose([
31 + torchvision.transforms.Resize((resize_size, resize_size)),
32 + torchvision.transforms.Grayscale(),
33 + torchvision.transforms.ToTensor(),
34 +])
35 +
36 +
37 +dataset = torchvision.datasets.ImageFolder(data_path, transform=img_transform)
38 +dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False)
39 +criterion = nn.L1Loss()
40 +
41 +for idx, data in enumerate(dataloader):
42 + img, _ = data
43 + img = img.cuda("cuda:1")
44 + output = model(img)
45 +
46 + save_image(output, f'./dc_img/test_output_{idx}.png')
47 +
48 + loss = criterion(output, img)
49 +
50 + img = img - output
51 +
52 + save_image(img, f'./dc_img/scratch_dif_{idx}.png')
53 +
54 +print(f"loss : {loss}")
...\ No newline at end of file ...\ No newline at end of file
1 +import torch
2 +import torchvision
3 +import torch.nn as nn
4 +import argparse
5 +from model import AutoEncoder, pytorch_autoencoder, AutoEncoder_s
6 +from get_mean_std import get_params
7 +from torchvision.utils import save_image
8 +
9 +parser = argparse.ArgumentParser(description='Process autoencoder')
10 +parser.add_argument('--config', type=str, help='select type')
11 +args = parser.parse_args()
12 +
13 +# 노말만 넣은 데이터
14 +data_path = "../data/Fourth_data/Auto"
15 +resize_size = 128
16 +num_epochs = 100
17 +batch_size = 128
18 +learning_rate = 1e-3
19 +
20 +# 보고서를 참고하여 만든 autoencoder 와 pytorch 에서 제공하는 autoencoder
21 +if args.config == "my":
22 + model = AutoEncoder().cuda("cuda:1")
23 +elif args.config == "pytorch":
24 + model = pytorch_autoencoder().cuda("cuda:1")
25 +else:
26 + model = AutoEncoder_s().cuda("cuda:1")
27 +
28 +print(model)
29 +#mean, std = get_params(data_path, resize_size)
30 +
31 +img_transform = torchvision.transforms.Compose([
32 + torchvision.transforms.Resize((resize_size, resize_size)),
33 + torchvision.transforms.Grayscale(),
34 + torchvision.transforms.ToTensor(),
35 +])
36 +
37 +dataset = torchvision.datasets.ImageFolder(data_path, transform=img_transform)
38 +dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
39 +
40 +criterion = nn.L1Loss()
41 +optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
42 +
43 +for epoch in range(num_epochs):
44 + for data in dataloader:
45 + img, _ = data
46 + img = img.cuda("cuda:1")
47 + output = model(img)
48 + loss = criterion(output, img)
49 +
50 + optimizer.zero_grad()
51 + loss.backward()
52 + optimizer.step()
53 +
54 + print('epoch [{}/{}], loss:{:.4f}'.format(epoch+1, num_epochs, loss.item()))
55 +
56 + if epoch % 10 ==0:
57 + save_image(output, './dc_img/image_{}.png'.format(epoch))
58 +
59 +torch.save(model.state_dict(), './dc_img/checkpoint.pth')
...\ No newline at end of file ...\ No newline at end of file
1 +import torch
2 +import torch.nn as nn
3 +import os
4 +import shutil
5 +import logging
6 +from model import mobilenetv3
7 +from utils import get_args_from_yaml
8 +import torchvision.datasets as datasets
9 +from utils import AverageMeter, accuracy, printlog, precision, recall
10 +import torchvision.transforms as transforms
11 +from torch.utils.data.sampler import SubsetRandomSampler
12 +import numpy as np
13 +import time
14 +from get_mean_std import get_params
15 +
16 +model = mobilenetv3(n_class=7, blocknum=6, dropout=0.5)
17 +model = model.train()
18 +data_path = "../data/All"
19 +check_path = "output/All/30114_model=MobilenetV3-ep=3000-block=6-class=8/model_best.pth.tar"
20 +validation_ratio = 0.1
21 +random_seed = 10
22 +gpus=[0]
23 +epochs = 3000
24 +resize_size=128
25 +
26 +logger = logging.getLogger()
27 +logger.setLevel(logging.INFO)
28 +streamHandler = logging.StreamHandler()
29 +logger.addHandler(streamHandler)
30 +
31 +fileHandler = logging.FileHandler("logs/finetune.log")
32 +logger.addHandler(fileHandler)
33 +
34 +
35 +def save_checkpoint(state, is_best, block =6, filename='checkpoint.pth.tar'):
36 + """Saves checkpoint to disk"""
37 + directory = "%s/%s/" % ('output', 'All')
38 + if not os.path.exists(directory):
39 + os.makedirs(directory)
40 + filename = directory + filename
41 + torch.save(state, filename)
42 + logger.info(f"Checkpoint Saved: {filename}")
43 + best_filename = f"output/All/model_best.pth.tar"
44 + if is_best:
45 + shutil.copyfile(filename, best_filename)
46 + logger.info(f"New Best Checkpoint saved: {best_filename}")
47 +
48 + return best_filename
49 +
50 +def validate(val_loader, model, criterion, epoch, q=None):
51 + """Perform validaadd_model_to_queuetion on the validation set"""
52 + with torch.no_grad():
53 + batch_time = AverageMeter()
54 + losses = AverageMeter()
55 + top1 = AverageMeter()
56 + prec = []
57 + rec = []
58 +
59 + for i in range(7):
60 + prec.append(AverageMeter())
61 + rec.append(AverageMeter())
62 + # switch to evaluate mode
63 + model.eval()
64 + end = time.time()
65 +
66 + for i, (input, target) in enumerate(val_loader):
67 + if torch.cuda.is_available():
68 + target = target.cuda()
69 + input = input.cuda()
70 +
71 + # compute output
72 + output = model(input)
73 + loss = criterion(output, target)
74 +
75 + # measure accuracy and record loss
76 + prec1 = accuracy(output.data, target, topk=(1,))[0]
77 +
78 + losses.update(loss.item(), input.size(0))
79 + top1.update(prec1.item(), input.size(0))
80 +
81 + for k in range(7):
82 + prec[k].update(precision(output.data, target, target_class=k), input.size(0))
83 + rec[k].update(recall(output.data, target, target_class=k), input.size(0))
84 +
85 + # measure elapsed time
86 + batch_time.update(time.time() - end)
87 + end = time.time()
88 +
89 + if i % 10 == 0:
90 + logger.info('Test: [{0}/{1}]\t'
91 + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
92 + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
93 + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
94 + .format(
95 + i, len(val_loader), batch_time=batch_time, loss=losses,
96 + top1=top1))
97 +
98 + printlog(' * epoch: {epoch} Prec@1 {top1.avg:.3f}'.format(epoch=epoch,top1=top1), logger, q)
99 +
100 + return top1.avg, prec, rec
101 +
102 +
103 +def train(model, train_loader, criterion, optimizer, epoch):
104 + batch_time = AverageMeter()
105 + losses = AverageMeter()
106 + top1 = AverageMeter()
107 + prec = AverageMeter()
108 +
109 + # switch to train mode
110 + model.train()
111 + end = time.time()
112 +
113 + for i, (input, target) in enumerate(train_loader):
114 + if torch.cuda.is_available():
115 + target = target.cuda()
116 + input = input.cuda()
117 + # compute output
118 + output = model(input)
119 + loss = criterion(output, target)
120 + # measure accuracy and record loss
121 + prec1 = accuracy(output, target, topk=(1,))[0]
122 +
123 + losses.update(loss.item(), input.size(0))
124 + top1.update(prec1.item(), input.size(0))
125 +
126 + # compute gradient and do SGD step
127 + optimizer.zero_grad()
128 + loss.backward()
129 + optimizer.step()
130 +
131 + # measure elapsed time
132 + batch_time.update(time.time() - end)
133 + end = time.time()
134 +
135 + if i % 10 == 0:
136 + logger.info('Epoch: [{0}][{1}/{2}]\t'
137 + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
138 + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
139 + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
140 + .format(
141 + epoch, i, len(train_loader), batch_time=batch_time,
142 + loss=losses, top1=top1))
143 +
144 +for idx, (name, module) in enumerate(model.named_modules()):
145 + if(idx < 62):
146 + for param in module.parameters():
147 + param.requires_grad = False
148 + else:
149 + for param in module.parameters():
150 + param.requires_grad = True
151 +
152 +mean, std = get_params(data_path, resize_size)
153 +normalize = transforms.Normalize(mean=[mean[0].item()],
154 + std=[std[0].item()])
155 +
156 +transform_train = transforms.Compose([
157 + transforms.Resize((resize_size, resize_size)), # 가로세로 크기 조정
158 + transforms.ColorJitter(0.2,0.2,0.2), # 밝기, 대비, 채도 조정
159 + transforms.RandomRotation(2), # -2~ 2도 만큼 회전
160 + transforms.RandomAffine(5), # affine 변환 (평행사변형이 된다든지, 사다리꼴이 된다든지)
161 + transforms.RandomCrop(resize_size, padding=2), # 원본에서 padding을 상하좌우 2로 둔 뒤, 64만큼 자름
162 + transforms.RandomHorizontalFlip(), # Data 변환 좌우 반전
163 + transforms.Grayscale(),
164 + transforms.ToTensor(),
165 + normalize
166 + ])
167 +
168 +transform_test = transforms.Compose([
169 + transforms.Resize((resize_size, resize_size)),
170 + transforms.Grayscale(),
171 + transforms.ToTensor(),
172 + normalize
173 +])
174 +
175 +kwargs = {'num_workers': 16, 'pin_memory': True}
176 +
177 +train_data = datasets.ImageFolder(data_path, transform_train)
178 +val_data = datasets.ImageFolder(data_path,transform_test)
179 +
180 +
181 +num_train = len(train_data)
182 +indices = list(range(num_train))
183 +split = int(np.floor(validation_ratio * num_train))
184 +
185 +# 랜덤 시드 설정. (Train이나 ,Test 일때 모두 10 이므로 같은 데이터셋이라 할 수 있다)
186 +np.random.seed(random_seed)
187 +np.random.shuffle(indices)
188 +
189 +# Train set, Validation set 나누기.
190 +train_idx, valid_idx = indices[split:], indices[:split]
191 +train_sampler = SubsetRandomSampler(train_idx)
192 +valid_sampler = SubsetRandomSampler(valid_idx)
193 +
194 +train_loader = torch.utils.data.DataLoader(
195 + train_data, batch_size=256, sampler=train_sampler, #shuffle = True
196 + **kwargs)
197 +val_loader = torch.utils.data.DataLoader(
198 + val_data, batch_size=256, sampler=valid_sampler, #shuffle = False
199 + **kwargs)
200 +
201 +criterion = nn.CrossEntropyLoss()
202 +optimizer = torch.optim.Adam(model.parameters(), 0.0001, weight_decay=0.0001)
203 +
204 +if torch.cuda.is_available():
205 + torch.cuda.set_device(gpus[0])
206 + with torch.cuda.device(gpus[0]):
207 + model = model.cuda()
208 + criterion = criterion.cuda()
209 + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])
210 +
211 +checkpoint = torch.load(check_path)
212 +
213 +pretrained_dict = checkpoint['state_dict']
214 +new_model_dict = model.state_dict()
215 +for k, v in pretrained_dict.items():
216 + if 'classifier' in k:
217 + continue
218 + new_model_dict.update({k : v})
219 +model.load_state_dict(new_model_dict)
220 +
221 +#model.load_state_dict(checkpoint['state_dict'], strict=False)
222 +best_prec1 = checkpoint['best_prec1']
223 +
224 +for epoch in range(epochs):
225 + train(model, train_loader, criterion, optimizer, epoch)
226 +
227 + prec1, prec, rec = validate(val_loader, model, criterion, epoch)
228 +
229 + is_best = prec1 >= best_prec1
230 +
231 + best_prec1 = max(prec1, best_prec1)
232 +
233 + checkpoint = save_checkpoint({
234 + 'epoch': epoch + 1,
235 + 'state_dict': model.state_dict(),
236 + 'best_prec1': best_prec1,
237 + }, is_best)
238 +
239 +
240 +for i in range(len(prec)):
241 + logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec[i]))
242 + logger.info(' * recall {rec.avg:.3f}'.format(rec=rec[i]))
...\ No newline at end of file ...\ No newline at end of file
1 +import torch
2 +import torch.nn as nn
3 +import torch.nn.functional as F
4 +from torch.autograd import Variable
5 +
6 +class FocalLoss(nn.Module):
7 + def __init__(self, gamma=0, alpha=None, size_average=True):
8 + super(FocalLoss, self).__init__()
9 + self.gamma = gamma
10 + self.alpha = alpha
11 + if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
12 + if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
13 + self.size_average = size_average
14 +
15 + def forward(self, input, target):
16 + if input.dim() > 2:
17 + input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
18 + input = input.transpose(1,2) # N,C,H*W => N,H*W,C
19 + input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
20 + target = target.view(-1,1)
21 +
22 + logpt = F.log_softmax(input)
23 + logpt = logpt.gather(1,target)
24 + logpt = logpt.view(-1)
25 + pt = Variable(logpt.data.exp())
26 + #pt = logpt.data.exp() #pt = (256), input = (256,5), target = (256,1)
27 + #pt는 logpt에 exponatial 적용
28 +
29 + self.alpha = self.alpha.cuda()
30 + if self.alpha is not None:
31 + if self.alpha.type()!=input.data.type():
32 + self.alpha = self.alpha.type_as(input.data)
33 + at = self.alpha.gather(0,target.data.view(-1))
34 + logpt = logpt * at
35 +
36 + loss = -1 * (1-pt)**self.gamma * logpt
37 +
38 + if self.size_average: return loss.mean()
39 + else: return loss.sum()
...\ No newline at end of file ...\ No newline at end of file
1 +import torch.multiprocessing as mp
2 +import torch
3 +import torchvision.datasets as datasets
4 +import torchvision.transforms as transforms
5 +import argparse
6 +import numpy as np
7 +from get_mean_std import get_params
8 +from model import mobilenetv3
9 +import parmap
10 +
11 +
12 +#Image resize 계수
13 +resize_size = 64
14 +
15 +#Class 의 개수.
16 +class_num = 7
17 +
18 +#사용한 Random Seed
19 +seeds = [39396, 2798, 3843, 62034, 8817, 65014, 45385]
20 +
21 +#기기에 있는 GPU 개수.
22 +gpu = 4
23 +
24 +#저장된 Checkpoint.
25 +checkpoints = [
26 + "output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
27 + "output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
28 + "output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
29 + "output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
30 + "output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
31 + "output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
32 + "output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar"
33 +]
34 +
35 +class AverageMeter(object):
36 + """Computes and stores the average and current value"""
37 +
38 + def __init__(self):
39 + self.reset()
40 +
41 + def reset(self):
42 + self.val = 0
43 + self.avg = 0
44 + self.sum = 0
45 + self.count = 0
46 +
47 + def update(self, val, n=1):
48 + self.val = val
49 + self.sum += val * n
50 + self.count += n
51 + self.avg = self.sum / self.count
52 +
53 +def accuracy(output, target, topk=(1,)):
54 + """Computes the precision@k for the specified values of k"""
55 + maxk = max(topk)
56 + batch_size = target.size(0)
57 +
58 + _, pred = output.topk(maxk, 1, True, True)
59 + pred = pred.t()
60 + correct = pred.eq(target.view(1, -1).expand_as(pred))
61 + res = []
62 + for k in topk:
63 + correct_k = correct[:k].view(-1).float().sum(0)
64 + res.append(correct_k.mul_(100.0 / batch_size))
65 + return res
66 +
67 +def get_models():
68 + models=[]
69 + for idx, checkpoint in enumerate(checkpoints):
70 + gpu_idx = idx % gpu
71 +
72 + weights = torch.load(checkpoint)
73 + model = mobilenetv3(n_class=class_num)
74 +
75 + torch.cuda.set_device(gpu_idx)
76 + with torch.cuda.device(gpu_idx):
77 + model = model.cuda()
78 +
79 + model = torch.nn.DataParallel(model, device_ids=[gpu_idx], output_device=gpu_idx)
80 + model.load_state_dict(weights['state_dict'])
81 +
82 + model.share_memory()
83 + models.append(model)
84 + return models
85 +
86 +def get_loader(path, resize_size):
87 + mean, std = get_params(path, resize_size)
88 + normalize = transforms.Normalize(mean=[mean[0].item()],
89 + std=[std[0].item()])
90 +
91 + transform = transforms.Compose([
92 + transforms.Resize((resize_size, resize_size)),
93 + transforms.Grayscale(),
94 + transforms.ToTensor(),
95 + normalize
96 + ])
97 + dataset = datasets.ImageFolder(args.path, transform)
98 + kwargs = {'num_workers': 4, 'pin_memory': True}
99 +
100 + loader = torch.utils.data.DataLoader(dataset, batch_size=256, shuffle=False, **kwargs)
101 +
102 + return loader
103 +
104 +def get_data(processnum ,model, loader, return_dict):
105 + with torch.no_grad():
106 + top1 = AverageMeter()
107 + model.eval()
108 + gpu_idx = processnum % gpu
109 + for i, data in enumerate(loader):
110 + (input, target) = data
111 +
112 + target = target.cuda(gpu_idx)
113 + input = input.cuda(gpu_idx)
114 +
115 + output = model(input)
116 +
117 + prec1 = accuracy(output, target, topk=(1,))[0]
118 +
119 + top1.update(prec1.item(), input.size(0))
120 +
121 + return_dict[processnum] = top1.avg
122 +
123 +
124 +
125 +if __name__ == '__main__':
126 + mp.set_start_method('spawn')
127 + parser = argparse.ArgumentParser()
128 + parser.add_argument("--path", required=True, help="path")
129 + args = parser.parse_args()
130 +
131 + manager = mp.Manager()
132 + return_dict = manager.dict()
133 +
134 +
135 + # get one loader
136 + loader = get_loader(args.path, resize_size)
137 +
138 + # multi model with other checkpoint.
139 + models = get_models()
140 +
141 + #loader is not array so can arise error
142 + processes = []
143 + for i, model in enumerate(models):
144 + p = mp.Process(target=get_data, args=(i, model, loader, return_dict))
145 + p.start()
146 + processes.append(p)
147 +
148 + for p in processes: p.join()
149 +
150 + for idx, seed in enumerate(seeds):
151 + print(f"process {idx}, seed {seed} : {return_dict[idx]}")
152 +
153 + print(f"total variance : {np.var(return_dict.values())}")
154 + #print(return_dict.values())
...\ No newline at end of file ...\ No newline at end of file
1 +import os
2 +import numpy as np
3 +import torch
4 +import torch.backends.cudnn as cudnn
5 +import torch.nn as nn
6 +import torchvision.models as models
7 +import torchvision.datasets as datasets
8 +import torchvision.transforms as transforms
9 +from torchvision.utils import save_image
10 +from PIL.ImageOps import grayscale
11 +from PIL import Image
12 +from torchvision.datasets import ImageFolder
13 +
14 +class MyDataset(ImageFolder):
15 + def __init__(self, root, trainsform):
16 + super(MyDataset, self).__init__(root, trainsform)
17 +
18 + def __getitem__(self, index):
19 + image, label = super(MyDataset, self).__getitem__(index)
20 + return image, label
21 +
22 +
23 +
24 +def get_params(path, resize_size):
25 + my_transform = transforms.Compose([
26 + transforms.Resize((resize_size,resize_size)),
27 + transforms.Grayscale(),
28 + transforms.ToTensor()
29 + ])
30 +
31 + my_dataset = MyDataset(path, my_transform)
32 +
33 + loader = torch.utils.data.DataLoader(
34 + my_dataset,
35 + batch_size=256,
36 + num_workers=8,
37 + shuffle=False
38 + )
39 +
40 + mean = 0.
41 + std = 0.
42 + nb_samples = 0.
43 + for i, (data, target) in enumerate(loader):
44 + batch_samples = data.size(0)
45 + data = data.view(batch_samples, data.size(1), -1)
46 + mean += data.mean(2).sum(0)
47 + std += data.std(2).sum(0)
48 + nb_samples += batch_samples
49 +
50 + mean /= nb_samples
51 + std /= nb_samples
52 + print(f"mean : {mean} , std : {std}")
53 + return mean, std
54 +
55 +"""
56 +my_transform = transforms.Compose([
57 +transforms.Resize((64,64)),
58 +transforms.ToTensor()
59 +])
60 +
61 +my_dataset = MyDataset("../data/Third_data/not_binary", my_transform)
62 +
63 +loader = torch.utils.data.DataLoader(
64 + my_dataset,
65 + batch_size=256,
66 + num_workers=8,
67 + shuffle=False
68 +)
69 +
70 +mean = 0.
71 +std = 0.
72 +nb_samples = 0.
73 +for i, (data, target) in enumerate(loader):
74 + batch_samples = data.size(0)
75 + data = data.view(batch_samples, data.size(1), -1)
76 + mean += data.mean(2).sum(0)
77 + std += data.std(2).sum(0)
78 + nb_samples += batch_samples
79 +
80 +mean /= nb_samples
81 +std /= nb_samples
82 +
83 +print(f"mean : {mean}, std : {std}")
84 +"""
...\ No newline at end of file ...\ No newline at end of file
1 +import os
2 +import time
3 +import sys
4 +import torch.nn.functional as F
5 +
6 +import numpy as np
7 +import PIL
8 +import torch
9 +import torch.backends.cudnn as cudnn
10 +import torch.nn as nn
11 +import torch.nn.parallel
12 +import torch.utils.data
13 +import torchvision.datasets as datasets
14 +import torchvision.transforms as transforms
15 +import yaml
16 +import cv2
17 +from get_mean_std import get_params
18 +sys.path.append(os.path.join(os.path.dirname(__name__)))
19 +from model import mobilenetv3
20 +
21 +if not os.path.exists("threshold"):
22 + os.mkdir("threshold")
23 +
24 +thresholds = [.05, .1, .15, .2, .25, .3, .35, .4, .45, .5]
25 +
26 +for threshold in thresholds:
27 + if not os.path.exists(f"threshold/{threshold}"):
28 + os.mkdir(f"threshold/{threshold}")
29 +
30 +
31 +def get_args_from_yaml(file='trainer/configs/Error_config.yml'):
32 + with open(file) as f:
33 + conf = yaml.load(f)
34 + return conf
35 +
36 +class MyImageFolder(datasets.ImageFolder):
37 + def __getitem__(self, index):
38 + # return image path
39 + return super(MyImageFolder, self).__getitem__(index), self.imgs[index]
40 +
41 +def main(args):
42 + run_model(args)
43 + print(f"[{args['id']}] done")
44 +
45 +def run_model(args):
46 + resize_size = args['train']['size']
47 +
48 + gpus = args['gpu']
49 +
50 + mean, std = get_params(args['data']['train'], resize_size)
51 +
52 + normalize = transforms.Normalize(mean=[mean[0].item()],
53 + std=[std[0].item()])
54 +
55 + normalize_factor = [mean, std]
56 +
57 + # data loader
58 + transform_test = transforms.Compose([
59 + transforms.Resize((resize_size,resize_size)),
60 + transforms.Grayscale(),
61 + transforms.ToTensor(),
62 + normalize
63 + ])
64 + kwargs = {'num_workers': args['predict']['worker'], 'pin_memory': True}
65 + test_data = MyImageFolder(args['data']['val'], transform_test)
66 + val_loader = torch.utils.data.DataLoader(
67 + test_data, batch_size=args['predict']['batch-size'], shuffle=False,
68 + **kwargs)
69 +
70 + # load model
71 + model = mobilenetv3(n_class= args['model']['class'], blocknum= args['model']['blocks'])
72 +
73 + torch.cuda.set_device(gpus[0])
74 + with torch.cuda.device(gpus[0]):
75 + model = model.cuda()
76 +
77 + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])
78 +
79 + print("=> loading checkpoint '{}'".format(args['checkpoint']))
80 + checkpoint = torch.load(args['checkpoint'])
81 + model.load_state_dict(checkpoint['state_dict'])
82 + print("=> loaded checkpoint '{}' (epoch {})"
83 + .format(args['checkpoint'], checkpoint['epoch']))
84 + cudnn.benchmark = True
85 +
86 + extract_data(val_loader, model, normalize_factor, args)
87 +
88 +
89 +def extract_data(val_loader, model, normalize_factor, args):
90 + with torch.no_grad():
91 + # switch to evaluate mode
92 + model.eval()
93 + for data in(val_loader):
94 + (input, target), (path , _) = data
95 + target = target.cuda()
96 + input = input.cuda()
97 +
98 + output = model(input)
99 +
100 + print("save data!")
101 + save_data(output, target, path)
102 +
103 +class AverageMeter(object):
104 + def __init__(self):
105 + self.reset()
106 + def reset(self):
107 + self.val = 0
108 + self.avg = 0
109 + self.sum = 0
110 + self.count = 0
111 + def update(self, val, n=1):
112 + self.val = val
113 + self.sum += val * n
114 + self.count += n
115 + self.avg = self.sum / self.count
116 +
117 +
118 +def accuracy(output, target, topk=(1,)):
119 + """Computes the precision@k for the specified values of k"""
120 + maxk = max(topk)
121 + batch_size = target.size(0)
122 + _, pred = output.topk(maxk, 1, True, True)
123 + pred = pred.t()
124 + correct = pred.eq(target.view(1, -1).expand_as(pred))
125 + res = []
126 + for k in topk:
127 + correct_k = correct[:k].view(-1).float().sum(0)
128 + res.append(correct_k.mul_(100.0 / batch_size))
129 + return res
130 +
131 +def save_data(output, target, path):
132 + n_digits = 3
133 + prob = F.softmax(output, dim=1)
134 + prob = torch.round(prob * 10**n_digits) / (10**n_digits)
135 + for idx, p in enumerate(prob):
136 + value = torch.topk(p, 2).values
137 + indice = torch.topk(p,2).indices
138 +
139 + value = value.tolist()
140 + indice = indice.tolist()
141 +
142 + gap = abs(value[0]-value[1])
143 + for threshold in thresholds:
144 + if(gap < threshold):
145 + img = cv2.imread(path[idx])
146 + filename = path[idx].split('/')[-1]
147 + cv2.imwrite(f'threshold/{threshold}/pred_{indice[0]}_{indice[1]}_{filename}', img)
148 +
149 +if __name__ == '__main__':
150 + args = get_args_from_yaml('configs/All_config.yml')
151 + args['config'] = 'All'
152 + args['id'] = 'threshold'
153 + main(args)
...\ No newline at end of file ...\ No newline at end of file
This diff could not be displayed because it is too large.
1 +2020-03-31-19-11-26
2 +use seed 963
3 +use dataset : ../data/Fourth_data/All
4 +{'task': 'All/train_2020-03-31-19-11-26_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-19-11-26'}
1 +2020-04-03-17-46-05
2 +use seed 635
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'All/train_2020-04-03-17-46-05_model=MobilenetV3-ep=4000-block=6-class=8', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/All', 'test': '../data/Fifth_data/All'}, 'train': {'epochs': 4000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar', 'augment': True, 'size': 224, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 8}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-46-05'}
5 +Number of model parameters: 462840
6 +=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar'
7 +=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000)
8 +Epoch: [0][0/26] Time 113.958 (113.958) Loss 0.0051 (0.0051) Prec@1 100.000 (100.000)
1 +2020-04-08-19-38-36
2 +use seed 283
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'All/train_2020-04-08-19-38-36_model=MobilenetV3-ep=3000-block=6-class=8', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/All', 'test': '../data/Fifth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 224, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 8}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-08-19-38-36'}
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\train.py", line 104, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\train.py", line 221, in run_model
10 + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader))
11 +TypeError: object of type 'DataLoader' has no len()
12 +[train_2020-04-08-19-38-36] failed
This diff could not be displayed because it is too large.
1 +2020-03-31-18-30-33
2 +use seed 626
3 +use dataset : ../data/Fourth_data/Error
4 +{'task': 'Error/train_2020-03-31-18-30-33_model=MobilenetV3-ep=3000-block=4', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [0], 'data': {'train': '../data/Fourth_data/Error', 'val': '../data/Fourth_data/Error', 'test': '../data/Fourth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 2.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-18-30-33'}
5 +Number of model parameters: 154706
6 +Fatal error in main loop
7 +Traceback (most recent call last):
8 + File "E:\code\detection\trainer\train.py", line 91, in main
9 + run_model(args, q)
10 + File "E:\code\detection\trainer\train.py", line 264, in run_model
11 + train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q)
12 + File "E:\code\detection\trainer\train.py", line 309, in train
13 + for i, (input, target) in enumerate(train_loader):
14 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__
15 + return _MultiProcessingDataLoaderIter(self)
16 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__
17 + w.start()
18 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
19 + self._popen = self._Popen(self)
20 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
21 + return _default_context.get_context().Process._Popen(process_obj)
22 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
23 + return Popen(process_obj)
24 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
25 + reduction.dump(process_obj, to_child)
26 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
27 + ForkingPickler(file, protocol).dump(obj)
28 +BrokenPipeError: [Errno 32] Broken pipe
29 +[train_2020-03-31-18-30-33] failed
1 +2020-04-01-17-53-24
2 +use seed 420
3 +use dataset : ../data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-01-17-53-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-17-53-24'}
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\train.py", line 91, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\train.py", line 125, in run_model
10 + mean, std = get_params(args['data']['train'], resize_size)
11 + File "E:\code\detection\trainer\get_mean_std.py", line 31, in get_params
12 + my_dataset = MyDataset(path, my_transform)
13 + File "E:\code\detection\trainer\get_mean_std.py", line 16, in __init__
14 + super(MyDataset, self).__init__(root, trainsform)
15 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 209, in __init__
16 + target_transform=target_transform)
17 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 83, in __init__
18 + classes, class_to_idx = self._find_classes(root)
19 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 116, in _find_classes
20 + classes = [d.name for d in os.scandir(dir) if d.is_dir()]
21 +FileNotFoundError: [WinError 3] 지정된 경로를 찾을 수 없습니다: '../data/Fifth_data/Error'
22 +[train_2020-04-01-17-53-24] failed
1 +2020-04-01-18-16-36
2 +use seed 95
3 +use dataset : ../data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-01-18-16-36_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-18-16-36'}
5 +Number of model parameters: 154706
6 +Epoch: [0][0/7] Time 43.018 (43.018) Loss 0.6986 (0.6986) Prec@1 34.473 (34.473) Precision 0.000 (0.000)
1 +2020-04-01-20-18-29
2 +use seed 997
3 +use dataset : ../data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-01-20-18-29_model=MobilenetV3-ep=3000-block=6-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-20-18-29'}
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\train.py", line 93, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\train.py", line 127, in run_model
10 + mean, std = get_params(args['data']['train'], resize_size)
11 + File "E:\code\detection\trainer\get_mean_std.py", line 43, in get_params
12 + for i, (data, target) in enumerate(loader):
13 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__
14 + return _MultiProcessingDataLoaderIter(self)
15 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__
16 + w.start()
17 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
18 + self._popen = self._Popen(self)
19 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
20 + return _default_context.get_context().Process._Popen(process_obj)
21 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
22 + return Popen(process_obj)
23 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
24 + reduction.dump(process_obj, to_child)
25 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
26 + ForkingPickler(file, protocol).dump(obj)
27 +BrokenPipeError: [Errno 32] Broken pipe
28 +[train_2020-04-01-20-18-29] failed
1 +2020-04-01-21-07-25
2 +use seed 880
3 +use dataset : ../data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-01-21-07-25_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-21-07-25'}
1 +2020-04-01-22-40-24
2 +use seed 238
3 +use dataset : ../data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-01-22-40-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-22-40-24'}
5 +Number of model parameters: 154706
6 +Epoch: [0][0/7] Time 44.533 (44.533) Loss 0.6956 (0.6956) Prec@1 41.504 (41.504) Precision 0.000 (0.000)
1 +2020-04-01-23-15-24
2 +use seed 666
3 +use dataset : ../data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-01-23-15-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-23-15-24'}
1 +2020-04-03-17-02-42
2 +use seed 185
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'Error/train_2020-04-03-17-02-42_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-02-42'}
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\train.py", line 102, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\train.py", line 195, in run_model
10 + **kwargs)
11 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__
12 + batch_sampler = BatchSampler(sampler, batch_size, drop_last)
13 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__
14 + "but got batch_size={}".format(batch_size))
15 +ValueError: batch_size should be a positive integer value, but got batch_size=256
16 +[train_2020-04-03-17-02-42] failed
1 +2020-04-03-17-04-30
2 +use seed 54
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'Error/train_2020-04-03-17-04-30_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-04-30'}
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\train.py", line 103, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\train.py", line 196, in run_model
10 + **kwargs)
11 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__
12 + batch_sampler = BatchSampler(sampler, batch_size, drop_last)
13 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__
14 + "but got batch_size={}".format(batch_size))
15 +ValueError: batch_size should be a positive integer value, but got batch_size=256
16 +[train_2020-04-03-17-04-30] failed
1 +2020-04-03-17-07-00
2 +use seed 809
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'Error/train_2020-04-03-17-07-00_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-07-00'}
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\train.py", line 103, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\train.py", line 196, in run_model
10 + **kwargs)
11 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__
12 + batch_sampler = BatchSampler(sampler, batch_size, drop_last)
13 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__
14 + "but got batch_size={}".format(batch_size))
15 +ValueError: batch_size should be a positive integer value, but got batch_size=256
16 +[train_2020-04-03-17-07-00] failed
1 +2020-04-03-17-08-43
2 +use seed 420
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'Error/train_2020-04-03-17-08-43_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-08-43'}
5 +Number of model parameters: 154706
6 +Fatal error in main loop
7 +Traceback (most recent call last):
8 + File "E:\code\detection\trainer\train.py", line 102, in main
9 + run_model(args, q)
10 + File "E:\code\detection\trainer\train.py", line 276, in run_model
11 + train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q)
12 + File "E:\code\detection\trainer\train.py", line 327, in train
13 + loss = criterion(output, target)
14 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 532, in __call__
15 + result = self.forward(*input, **kwargs)
16 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\loss.py", line 916, in forward
17 + ignore_index=self.ignore_index, reduction=self.reduction)
18 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 2021, in cross_entropy
19 + return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
20 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 1838, in nll_loss
21 + ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
22 +IndexError: Target 5 is out of bounds.
23 +[train_2020-04-03-17-08-43] failed
24 +2020-04-03-17-09-59
25 +use seed 420
26 +use dataset : E:/code/detection/data/Fifth_data/Error
27 +{'task': 'Error/train_2020-04-03-17-09-59_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-09-59'}
28 +Number of model parameters: 154706
29 +Epoch: [0][0/26] Time 32.853 (32.853) Loss 0.6907 (0.6907) Prec@1 56.641 (56.641) Precision 0.000 (0.000)
1 +2020-04-03-17-09-59
2 +use seed 420
3 +use dataset : E:/code/detection/data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-03-17-09-59_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-09-59'}
5 +Number of model parameters: 154706
6 +Epoch: [0][0/26] Time 32.853 (32.853) Loss 0.6907 (0.6907) Prec@1 56.641 (56.641) Precision 0.000 (0.000)
1 +2020-04-03-17-22-28
2 +use seed 845
3 +use dataset : E:/code/detection/data/Fifth_data/Error
4 +{'task': 'Error/train_2020-04-03-17-22-28_model=MobilenetV3-ep=1000-block=5-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 1000, 'start-epoch': 0, 'batch-size': 128, 'worker': 16, 'resume': '', 'augment': True, 'size': 128, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 128, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.01, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 5, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-22-28'}
5 +Number of model parameters: 400114
1 +2020-04-08-19-37-53
2 +use seed 41
3 +use dataset : E:/code/detection/data/Fifth_data/All
4 +{'task': 'Error/train_2020-04-08-19-37-53_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-08-19-37-53'}
This diff could not be displayed because it is too large.
1 +2020-04-01-19-51-23
2 +use seed 355
3 +use dataset : ../data/Fifth_data/ErrorType
4 +{'task': 'ErrorType/train_2020-04-01-19-51-23_model=MobilenetV3-ep=3000-block=6-class=7', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/ErrorType', 'val': '../data/Fifth_data/ErrorType', 'test': '../data/Fifth_data/ErrorType'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 4048, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 4048, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-19-51-23'}
5 +Number of model parameters: 461559
1 +2020-04-01-22-42-16
2 +use seed 805
3 +use dataset : ../data/Fifth_data/ErrorType
4 +{'task': 'Type/train_2020-04-01-22-42-16_model=MobilenetV3-ep=3000-block=6-class=7', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/ErrorType', 'val': '../data/Fifth_data/ErrorType', 'test': '../data/Fifth_data/ErrorType'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 4048, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 4048, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-22-42-16'}
5 +Number of model parameters: 461559
This diff is collapsed. Click to expand it.
1 +Number of model parameters: 461559
2 +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
3 +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
4 + * Prec@1 95.495
5 + * Prec@1 95.495
6 +Best accuracy: 95.49549572460644
7 +[validate_2020-03-26-17-26-14] done
8 +[validate_2020-03-26-17-26-14] done
9 +/home/yh9468/detection/data/Fourth_data/demo Test dir submitted
10 +start test using path : /home/yh9468/detection/data/Fourth_data/demo
11 +Test start
12 +loading checkpoint...
13 +checkpoint already loaded!
14 +start test
15 +data path directory is /home/yh9468/detection/data/Fourth_data/demo
16 +finish test
17 +/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp test file submitted
18 +start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp', 'All Files(*)')
19 +Test start
20 +loading checkpoint...
21 +checkpoint already loaded!
22 +start test
23 +finish test
1 +Number of model parameters: 461559
2 +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
3 +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
4 + * Prec@1 95.495
5 + * Prec@1 95.495
6 +Best accuracy: 95.49549572460644
7 +[validate_2020-03-26-17-48-44] done
8 +[validate_2020-03-26-17-48-44] done
9 +set error
1 +using default checkpoint
2 +Number of model parameters: 461559
3 +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
4 +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\test.py", line 270, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\test.py", line 360, in run_model
10 + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
11 + File "E:\code\detection\trainer\test.py", line 394, in validate
12 + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True)
13 + File "E:\code\detection\trainer\test.py", line 455, in save_error_case
14 + os.mkdir(f"eval_results/{args['task']}")
15 +FileNotFoundError: [WinError 3] 지정된 경로를 찾을 수 없습니다: 'eval_results/All'
16 +[validate_2020-03-31-18-34-56] failed
1 +Number of model parameters: 461559
2 +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
3 +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
4 +Test: [0/2] Time 118.781 (118.781) Loss 0.3068 (0.3068) Prec@1 95.703 (95.703)
5 +Test: [1/2] Time 3.661 (61.221) Loss 0.4321 (0.3358) Prec@1 94.805 (95.495)
6 + * Prec@1 95.495
7 + * Prec@1 95.495
8 +Best accuracy: 95.49549572460644
9 +[validate_2020-03-31-19-08-47] done
10 +[validate_2020-03-31-19-08-47] done
11 +train start
12 +2020-03-31-19-11-26
13 +use seed 963
14 +use dataset : ../data/Fourth_data/All
15 +{'task': 'All/train_2020-03-31-19-11-26_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-19-11-26'}
1 +using default checkpoint
2 +Number of model parameters: 462840
3 +=> loading checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
4 +=> loaded checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1617)
5 +Test: [0/3] Time 31.288 (31.288) Loss 0.2660 (0.2660) Prec@1 95.703 (95.703)
6 +Test: [1/3] Time 7.587 (19.437) Loss 0.3209 (0.2934) Prec@1 95.312 (95.508)
7 +Test: [2/3] Time 6.625 (15.167) Loss 0.1835 (0.2602) Prec@1 96.396 (95.777)
8 + * Prec@1 95.777
9 + * Prec@1 95.777
10 +Best accuracy: 95.77656669512757
11 +[validate_2020-04-01-23-00-04] done
12 +[validate_2020-04-01-23-00-04] done
13 +set error
14 +Test를 수행하기 위해 데이터를 입력해 주세요.
15 +Test를 수행하기 위해 데이터를 입력해 주세요.
1 +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
2 +Number of model parameters: 462840
3 +=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar'
4 +=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000)
5 +Test: [0/3] Time 32.591 (32.591) Loss 0.1575 (0.1575) Prec@1 94.531 (94.531)
6 +Test: [1/3] Time 8.179 (20.385) Loss 0.2475 (0.2025) Prec@1 93.750 (94.141)
7 +Test: [2/3] Time 7.374 (16.048) Loss 0.4568 (0.2794) Prec@1 94.595 (94.278)
8 + * Prec@1 94.278
9 + * Prec@1 94.278
10 +Best accuracy: 96.04904700754774
11 +[validate_2020-04-03-17-39-50] done
12 +[validate_2020-04-03-17-39-50] done
13 +E:/code/detection/data/Fifth_data/All/Empty/1-5.bmp test file submitted
14 +Test start
15 +start test using path : E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
16 +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
17 +loading checkpoint...
18 +checkpoint already loaded!
19 +start test
20 +single_file_test() missing 1 required positional argument: 'q'
21 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
This diff is collapsed. Click to expand it.
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 +Fatal error in main loop
5 +Traceback (most recent call last):
6 + File "/home/yh9468/detection/trainer/test.py", line 181, in main
7 + run_model(args, q)
8 + File "/home/yh9468/detection/trainer/test.py", line 263, in run_model
9 + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
10 + File "/home/yh9468/detection/trainer/test.py", line 296, in validate
11 + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True)
12 + File "/home/yh9468/detection/trainer/test.py", line 373, in save_error_case
13 + cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img)
14 +NameError: name 'correct_case_idx' is not defined
15 +[validate_2020-03-26-16-50-29] failed
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 +Fatal error in main loop
5 +Traceback (most recent call last):
6 + File "/home/yh9468/detection/trainer/test.py", line 184, in main
7 + run_model(args, q)
8 + File "/home/yh9468/detection/trainer/test.py", line 266, in run_model
9 + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
10 + File "/home/yh9468/detection/trainer/test.py", line 299, in validate
11 + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True)
12 + File "/home/yh9468/detection/trainer/test.py", line 376, in save_error_case
13 + cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img)
14 +NameError: name 'correct_case_idx' is not defined
15 +[validate_2020-03-26-16-55-37] failed
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 + * Prec@1 98.193
5 + * Prec@1 98.193
6 +Best accuracy: 98.19277163585984
7 +[validate_2020-03-26-16-57-52] done
8 +[validate_2020-03-26-16-57-52] done
9 +start test using path : ../data/Fourth_data/demo
10 +Test start
11 +loading checkpoint...
12 +checkpoint already loaded!
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 + * Prec@1 98.193
5 + * Prec@1 98.193
6 +Best accuracy: 98.1927713600986
7 +[validate_2020-03-26-17-02-22] done
8 +[validate_2020-03-26-17-02-22] done
9 +start test using path : ../data/Fourth_data/demo
10 +Test start
11 +loading checkpoint...
12 +checkpoint already loaded!
13 +start test
14 +data path directory is ../data/Fourth_data/demo
15 +finish test
16 +start test using path : ../data/Fourth_data/demo
17 +Test start
18 +loading checkpoint...
19 +checkpoint already loaded!
20 +start test
21 +data path directory is ../data/Fourth_data/demo
22 +finish test
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 + * Prec@1 98.193
5 + * Prec@1 98.193
6 +Best accuracy: 98.1927713600986
7 +[validate_2020-03-26-17-26-07] done
8 +[validate_2020-03-26-17-26-07] done
9 +set Type
10 +Number of model parameters: 461559
11 +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
12 +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
13 + * Prec@1 95.495
14 + * Prec@1 95.495
15 +Best accuracy: 95.49549572460644
16 +[validate_2020-03-26-17-26-14] done
17 +[validate_2020-03-26-17-26-14] done
18 +/home/yh9468/detection/data/Fourth_data/demo Test dir submitted
19 +start test using path : /home/yh9468/detection/data/Fourth_data/demo
20 +Test start
21 +loading checkpoint...
22 +checkpoint already loaded!
23 +start test
24 +data path directory is /home/yh9468/detection/data/Fourth_data/demo
25 +finish test
26 +/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp test file submitted
27 +start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp', 'All Files(*)')
28 +Test start
29 +loading checkpoint...
30 +checkpoint already loaded!
31 +start test
32 +finish test
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 + * Prec@1 98.193
5 + * Prec@1 98.193
6 +Best accuracy: 98.1927713600986
7 +[validate_2020-03-27-11-52-28] done
8 +[validate_2020-03-27-11-52-28] done
9 +start test using path : ../data/Fourth_data/demo
10 +Test start
11 +loading checkpoint...
12 +checkpoint already loaded!
13 +start test
14 +data path directory is ../data/Fourth_data/demo
15 +Inference time 21 images : 0.37514
16 +finish test
17 +set Type
18 +start test using path : ../data/Fourth_data/demo
19 +Test start
20 +loading checkpoint...
21 +checkpoint already loaded!
22 +start test
23 +data path directory is ../data/Fourth_data/demo
24 +Inference time 21 images : 0.7917
25 +finish test
26 +/home/yh9468/detection/data/Fourth_data/demo/demoset/1-1.bmp test file submitted
27 +start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-1.bmp', 'All Files(*)')
28 +Test start
29 +loading checkpoint...
30 +checkpoint already loaded!
31 +start test
32 +Inference time 1 image : 0.03704
33 +finish test
1 +using user's checkpoint ('E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar', 'All Files(*)')
2 +Number of model parameters: 154706
3 +=> loading checkpoint '('E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar', 'All Files(*)')'
4 +Fatal error in main loop
5 +Traceback (most recent call last):
6 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 289, in _check_seekable
7 + f.seek(f.tell())
8 +AttributeError: 'tuple' object has no attribute 'seek'
9 +
10 +During handling of the above exception, another exception occurred:
11 +
12 +Traceback (most recent call last):
13 + File "E:\code\detection\trainer\test.py", line 256, in main
14 + run_model(args, q)
15 + File "E:\code\detection\trainer\test.py", line 328, in run_model
16 + checkpoint = torch.load(args['checkpoint'])
17 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 525, in load
18 + with _open_file_like(f, 'rb') as opened_file:
19 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 217, in _open_file_like
20 + return _open_buffer_reader(name_or_buffer)
21 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 202, in __init__
22 + _check_seekable(buffer)
23 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 292, in _check_seekable
24 + raise_err_msg(["seek", "tell"], e)
25 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 285, in raise_err_msg
26 + raise type(e)(msg)
27 +AttributeError: 'tuple' object has no attribute 'seek'. You can only torch.load from a file that is seekable. Please pre-load the data into a buffer like io.BytesIO and try to load from it instead.
28 +[validate_2020-03-31-14-53-49] failed
1 +using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
2 +Number of model parameters: 154706
3 +=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
4 +Fatal error in main loop
5 +Traceback (most recent call last):
6 + File "E:\code\detection\trainer\test.py", line 261, in main
7 + run_model(args, q)
8 + File "E:\code\detection\trainer\test.py", line 333, in run_model
9 + checkpoint = torch.load(args['checkpoint'])
10 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 529, in load
11 + return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
12 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 702, in _legacy_load
13 + result = unpickler.load()
14 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 665, in persistent_load
15 + deserialized_objects[root_key] = restore_location(obj, location)
16 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 156, in default_restore_location
17 + result = fn(storage, location)
18 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 132, in _cuda_deserialize
19 + device = validate_cuda_device(location)
20 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 116, in validate_cuda_device
21 + raise RuntimeError('Attempting to deserialize object on a CUDA '
22 +RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
23 +[validate_2020-03-31-14-58-23] failed
1 +using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
2 +Number of model parameters: 154706
3 +=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
4 +=> loaded checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
5 + * Prec@1 91.867
6 + * Prec@1 91.867
7 +Best accuracy: 95.90643257007264
1 +using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
2 +Number of model parameters: 154706
3 +=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
4 +=> loaded checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
5 +Test: [0/2] Time 23.714 (23.714) Loss 0.1847 (0.1847) Prec@1 92.969 (92.969)
6 +Test: [1/2] Time 0.464 (12.089) Loss 0.2262 (0.1942) Prec@1 88.158 (91.867)
7 + * Prec@1 91.867
8 + * Prec@1 91.867
9 +Best accuracy: 95.90643257007264
10 +[validate_2020-03-31-15-08-03] done
11 +[validate_2020-03-31-15-08-03] done
1 +using default checkpoint
2 +Number of model parameters: 154706
3 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
4 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\test.py", line 271, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\test.py", line 359, in run_model
10 + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
11 + File "E:\code\detection\trainer\test.py", line 393, in validate
12 + if args['predict']['save']:
13 +KeyError: 'save'
14 +[validate_2020-04-01-18-17-52] failed
1 +Number of model parameters: 154706
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
4 +Test: [0/1] Time 26.222 (26.222) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823)
5 + * Prec@1 94.823
6 + * Prec@1 94.823
7 +Best accuracy: 98.1927713600986
8 +[validate_2020-04-01-18-45-23] done
9 +[validate_2020-04-01-18-45-23] done
10 +start test using path : default checkpoint
11 +Test start
12 +using default checkpoint
13 +loading checkpoint...
14 +checkpoint already loaded!
15 +start test
16 +data path directory is ../data/Fourth_data/demo
17 +Inference time 120 images : 4.358
18 +finish test
19 +start test using path : default checkpoint
20 +Test start
21 +using default checkpoint
22 +loading checkpoint...
23 +[Errno 2] No such file or directory: 'n'
24 +checkpoint already loaded!
25 +start test
26 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
27 +E:/code/detection/data/Fifth_data/All/Flip/1-1.bmp test file submitted
28 +start test using path : default checkpoint
29 +Test start
30 +using default checkpoint
31 +loading checkpoint...
32 +[Errno 2] No such file or directory: 'E'
33 +checkpoint already loaded!
34 +start test
35 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
36 +start test using path : default checkpoint
37 +Test start
38 +using default checkpoint
39 +loading checkpoint...
40 +[Errno 2] No such file or directory: 'E'
41 +checkpoint already loaded!
42 +start test
43 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
1 +using default checkpoint
2 +Number of model parameters: 154706
3 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
4 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
5 +Test: [0/1] Time 27.498 (27.498) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823)
6 + * Prec@1 94.823
7 + * Prec@1 94.823
8 +Best accuracy: 98.1927713600986
9 +[validate_2020-04-01-20-18-55] done
10 +[validate_2020-04-01-20-18-55] done
1 +using default checkpoint
2 +Number of model parameters: 400114
3 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
4 +Fatal error in main loop
5 +Traceback (most recent call last):
6 + File "E:\code\detection\trainer\test.py", line 274, in main
7 + run_model(args, q)
8 + File "E:\code\detection\trainer\test.py", line 351, in run_model
9 + model.load_state_dict(checkpoint['state_dict'])
10 + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 830, in load_state_dict
11 + self.__class__.__name__, "\n\t".join(error_msgs)))
12 +RuntimeError: Error(s) in loading state_dict for DataParallel:
13 + Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.0.weight", "module.features.6.1.weight", "module.features.6.1.bias", "module.features.6.1.running_mean", "module.features.6.1.running_var", "module.features.8.weight", "module.features.8.bias".
14 + Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias".
15 +[validate_2020-04-01-20-21-14] failed
1 +Number of model parameters: 400114
2 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +printlog() missing 2 required positional arguments: 'logger' and 'q'
4 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
1 +using default checkpoint
2 +Number of model parameters: 154706
3 +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
4 +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
5 +Test: [0/1] Time 26.242 (26.242) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823)
6 + * Prec@1 94.823
7 + * Prec@1 94.823
8 +Best accuracy: 98.1927713600986
9 +[validate_2020-04-01-22-45-31] done
10 +[validate_2020-04-01-22-45-31] done
11 +set Type
12 +start test using path : ../data/Fourth_data/demo
13 +val start
14 +using default checkpoint
15 +Number of model parameters: 461559
16 +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
17 +printlog() missing 2 required positional arguments: 'logger' and 'q'
18 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
This diff is collapsed. Click to expand it.
1 +Number of model parameters: 460278
2 +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
3 +=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865)
4 +Test: [0/1] Time 29.993 (29.993) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952)
5 + * Prec@1 92.952
6 + * Prec@1 92.952
7 +Best accuracy: 92.9515380859375
8 +[validate_2020-03-31-16-28-46] done
9 +[validate_2020-03-31-16-28-46] done
10 +set All processing
11 +start test using path : ../data/Fourth_data/demo
12 +Test start
13 +[Errno 2] No such file or directory: 'configs/overall_config.yaml'
14 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
1 +using default checkpoint
2 +Number of model parameters: 460278
3 +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
4 +=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865)
5 +Fatal error in main loop
6 +Traceback (most recent call last):
7 + File "E:\code\detection\trainer\test.py", line 270, in main
8 + run_model(args, q)
9 + File "E:\code\detection\trainer\test.py", line 358, in run_model
10 + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
11 + File "E:\code\detection\trainer\test.py", line 392, in validate
12 + if args['predict']['save']:
13 +KeyError: 'save'
14 +[validate_2020-03-31-18-52-03] failed
1 +using default checkpoint
2 +Number of model parameters: 461559
3 +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
4 +printlog() missing 2 required positional arguments: 'logger' and 'q'
5 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
1 +using default checkpoint
2 +Number of model parameters: 461559
3 +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
4 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
5 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
6 +Error(s) in loading state_dict for DataParallel:
7 + size mismatch for module.classifier.1.weight: copying a param with shape torch.Size([6, 1280]) from checkpoint, the shape in current model is torch.Size([7, 1280]).
8 + size mismatch for module.classifier.1.bias: copying a param with shape torch.Size([6]) from checkpoint, the shape in current model is torch.Size([7]).
9 +start test using path : ../data/Fourth_data/demo
10 +val start
11 +using default checkpoint
12 +Number of model parameters: 461559
13 +=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
14 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
15 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
16 +Error(s) in loading state_dict for DataParallel:
17 + Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.conv.0.weight", "module.features.6.conv.1.weight", "module.features.6.conv.1.bias", "module.features.6.conv.1.running_mean", "module.features.6.conv.1.running_var", "module.features.6.conv.3.weight", "module.features.6.conv.4.weight", "module.features.6.conv.4.bias", "module.features.6.conv.4.running_mean", "module.features.6.conv.4.running_var", "module.features.6.conv.5.fc.0.weight", "module.features.6.conv.5.fc.2.weight", "module.features.6.conv.7.weight", "module.features.6.conv.8.weight", "module.features.6.conv.8.bias", "module.features.6.conv.8.running_mean", "module.features.6.conv.8.running_var", "module.features.7.0.weight", "module.features.7.1.weight", "module.features.7.1.bias", "module.features.7.1.running_mean", "module.features.7.1.running_var", "module.features.9.weight", "module.features.9.bias".
18 + Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias".
1 +Number of model parameters: 461559
2 +=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
3 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
4 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
5 +Error(s) in loading state_dict for DataParallel:
6 + Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.conv.0.weight", "module.features.6.conv.1.weight", "module.features.6.conv.1.bias", "module.features.6.conv.1.running_mean", "module.features.6.conv.1.running_var", "module.features.6.conv.3.weight", "module.features.6.conv.4.weight", "module.features.6.conv.4.bias", "module.features.6.conv.4.running_mean", "module.features.6.conv.4.running_var", "module.features.6.conv.5.fc.0.weight", "module.features.6.conv.5.fc.2.weight", "module.features.6.conv.7.weight", "module.features.6.conv.8.weight", "module.features.6.conv.8.bias", "module.features.6.conv.8.running_mean", "module.features.6.conv.8.running_var", "module.features.7.0.weight", "module.features.7.1.weight", "module.features.7.1.bias", "module.features.7.1.running_mean", "module.features.7.1.running_var", "module.features.9.weight", "module.features.9.bias".
7 + Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias".
1 +using default checkpoint
2 +Number of model parameters: 161111
3 +=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
4 +=> loaded checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1884)
5 +Test: [0/1] Time 25.026 (25.026) Loss 0.2817 (0.2817) Prec@1 95.918 (95.918)
6 + * Prec@1 95.918
7 + * Prec@1 95.918
8 +Best accuracy: 95.91837310791016
9 +[validate_2020-04-01-22-59-05] done
10 +[validate_2020-04-01-22-59-05] done
11 +set All processing
12 +start test using path : ../data/Fourth_data/demo
13 +val start
14 +using default checkpoint
15 +Number of model parameters: 462840
16 +=> loading checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
17 +=> loaded checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1617)
18 +Test: [0/3] Time 31.288 (31.288) Loss 0.2660 (0.2660) Prec@1 95.703 (95.703)
19 +Test: [1/3] Time 7.587 (19.437) Loss 0.3209 (0.2934) Prec@1 95.312 (95.508)
20 +Test: [2/3] Time 6.625 (15.167) Loss 0.1835 (0.2602) Prec@1 96.396 (95.777)
21 + * Prec@1 95.777
22 + * Prec@1 95.777
23 +Best accuracy: 95.77656669512757
24 +[validate_2020-04-01-23-00-04] done
25 +[validate_2020-04-01-23-00-04] done
26 +set error
27 +Test를 수행하기 위해 데이터를 입력해 주세요.
28 +Test를 수행하기 위해 데이터를 입력해 주세요.
1 +using user's checkpoint E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
2 +Number of model parameters: 161111
3 +=> loading checkpoint 'E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
4 +=> loaded checkpoint 'E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
5 +Test: [0/2] Time 26.118 (26.118) Loss 0.2720 (0.2720) Prec@1 93.359 (93.359)
6 +Test: [1/2] Time 0.848 (13.483) Loss 0.4744 (0.3686) Prec@1 91.453 (92.449)
7 + * Prec@1 92.449
8 + * Prec@1 92.449
9 +Best accuracy: 95.91836762720224
10 +[validate_2020-04-03-17-24-24] done
11 +[validate_2020-04-03-17-24-24] done
12 +set All processing
13 +E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar checkpoint file submitted
14 +E:/code/detection/data/Fifth_data/All Test dir submitted
15 +val start
16 +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
17 +Number of model parameters: 462840
18 +=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar'
19 +=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000)
20 +Test: [0/3] Time 32.591 (32.591) Loss 0.1575 (0.1575) Prec@1 94.531 (94.531)
21 +Test: [1/3] Time 8.179 (20.385) Loss 0.2475 (0.2025) Prec@1 93.750 (94.141)
22 +Test: [2/3] Time 7.374 (16.048) Loss 0.4568 (0.2794) Prec@1 94.595 (94.278)
23 + * Prec@1 94.278
24 + * Prec@1 94.278
25 +Best accuracy: 96.04904700754774
26 +[validate_2020-04-03-17-39-50] done
27 +[validate_2020-04-03-17-39-50] done
28 +E:/code/detection/data/Fifth_data/All/Empty/1-5.bmp test file submitted
29 +Test start
30 +start test using path : E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
31 +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
32 +loading checkpoint...
33 +checkpoint already loaded!
34 +start test
35 +single_file_test() missing 1 required positional argument: 'q'
36 +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
1 +import argparse
2 +import random
3 +import os
4 +import cv2
5 +import logging
6 +import datetime
7 +
8 +import torch
9 +import torch.nn as nn
10 +import torchvision.datasets as datasets
11 +import torchvision.transforms as transforms
12 +from torchvision.utils import save_image
13 +
14 +from model import mobilenetv3
15 +from utils import get_args_from_yaml, MyImageFolder
16 +from get_mean_std import get_params
17 +
18 +## 해당 코드는 전체 inference를 모두 담은 code.
19 +
20 +# make Logger
21 +logger = logging.getLogger(os.path.dirname(__name__))
22 +logger.setLevel(logging.INFO)
23 +
24 +# make Logger stream
25 +streamHandler = logging.StreamHandler()
26 +logger.addHandler(streamHandler)
27 +
28 +if not os.path.exists('eval_results/main'):
29 + os.mkdir('eval_results/main')
30 +
31 +if not os.path.exists('eval_results/main/Normal'):
32 + os.mkdir('eval_results/main/Normal')
33 +
34 +if not os.path.exists('eval_results/main/Crack'):
35 + os.mkdir('eval_results/main/Crack')
36 +
37 +if not os.path.exists('eval_results/main/Empty'):
38 + os.mkdir('eval_results/main/Empty')
39 +
40 +if not os.path.exists('eval_results/main/Flip'):
41 + os.mkdir('eval_results/main/Flip')
42 +
43 +if not os.path.exists('eval_results/main/Pollute'):
44 + os.mkdir('eval_results/main/Pollute')
45 +
46 +if not os.path.exists('eval_results/main/Double'):
47 + os.mkdir('eval_results/main/Double')
48 +
49 +if not os.path.exists('eval_results/main/Leave'):
50 + os.mkdir('eval_results/main/Leave')
51 +
52 +if not os.path.exists('eval_results/main/Scratch'):
53 + os.mkdir('eval_results/main/Scratch')
54 +
55 +
56 +def main(Error_args, Error_Type_args):
57 + logdir = f"logs/main/"
58 + if not os.path.exists(logdir):
59 + os.mkdir(logdir)
60 + fileHander = logging.FileHandler(logdir + f"{datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S')}_log.log")
61 + logger.addHandler(fileHander)
62 +
63 + run(Error_args, Error_Type_args)
64 +
65 +def run(Error_args, Error_Type_args):
66 + Error_args['checkpoint'] = "output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar"
67 + Error_Type_args['checkpoint'] = "output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar"
68 +
69 + Error_model = mobilenetv3(n_class= Error_args['model']['class'], blocknum=Error_args['model']['blocks'])
70 + Error_Type_model = mobilenetv3(n_class=Error_Type_args['model']['class'], blocknum=Error_Type_args['model']['blocks'])
71 +
72 + gpus = Error_args['gpu']
73 + resize_size = Error_args['train']['size']
74 +
75 + torch.cuda.set_device(gpus[0])
76 + with torch.cuda.device(gpus[0]):
77 + Error_model = Error_model.cuda()
78 + Error_Type_model = Error_Type_model.cuda()
79 +
80 + Error_model = torch.nn.DataParallel(Error_model, device_ids=gpus, output_device=gpus[0])
81 + Error_Type_model = torch.nn.DataParallel(Error_Type_model, device_ids=gpus, output_device=gpus[0])
82 +
83 + Error_checkpoint = torch.load(Error_args['checkpoint'])
84 + Error_Type_checkpoint = torch.load(Error_Type_args['checkpoint'])
85 +
86 + Error_model.load_state_dict(Error_checkpoint['state_dict'])
87 + Error_Type_model.load_state_dict(Error_Type_checkpoint['state_dict'])
88 +
89 + mean, std = get_params(Error_args['data']['test'], resize_size)
90 + normalize = transforms.Normalize(mean=[mean[0].item()],
91 + std=[std[0].item()])
92 +
93 + transform = transforms.Compose([
94 + transforms.Resize((resize_size, resize_size)),
95 + transforms.Grayscale(),
96 + transforms.ToTensor(),
97 + normalize
98 + ])
99 +
100 + dataset = MyImageFolder(Error_args['data']['test'], transform)
101 +
102 + print(len(dataset))
103 +
104 + loader = torch.utils.data.DataLoader(
105 + dataset, batch_size=Error_args['predict']['batch-size'], shuffle=False,
106 + num_workers=Error_args['predict']['worker'], pin_memory=True
107 + )
108 +
109 + for data in loader:
110 + (input, _), (path, _) = data
111 + input= input.cuda()
112 +
113 + output = Error_model(input)
114 + _, output = output.topk(1 ,1 ,True,True)
115 +
116 + error_cases = torch.ones((1,1,64,64)).cuda()
117 + new_paths = []
118 +
119 + error = 0
120 + normal = 0
121 + for idx in range(input.shape[0]):
122 + # if Error Case
123 +
124 + if output[idx] == 0:
125 + error_cases = torch.cat((error_cases, input[idx:idx+1]), dim=0)
126 + new_paths.append(path[idx])
127 + error = error +1
128 + # Normal Case
129 + else:
130 + img = cv2.imread(path[idx])
131 + cv2.imwrite(f"eval_results/main/Normal/{path[idx].split('/')[-1]}", img)
132 + normal = normal+1
133 +
134 + print(f"error path : {len(new_paths)}")
135 + print(f"error : {error}")
136 + print(f"normal : {normal}")
137 +
138 + error_cases = error_cases[1:]
139 + print(error_cases.shape[0])
140 +
141 + output = Error_Type_model(error_cases)
142 + _, output = output.topk(1 ,1 ,True,True)
143 +
144 + for idx in range(error_cases.shape[0]):
145 + # Crack
146 + if output[idx] == 0:
147 + img = cv2.imread(new_paths[idx])
148 + cv2.imwrite(f"eval_results/main/Crack/{new_paths[idx].split('/')[-1]}", img)
149 +
150 + # Double
151 + elif output[idx] == 1:
152 + img = cv2.imread(new_paths[idx])
153 + cv2.imwrite(f"eval_results/main/Double/{new_paths[idx].split('/')[-1]}", img)
154 +
155 + # Empty
156 + elif output[idx] == 2:
157 + img = cv2.imread(new_paths[idx])
158 + cv2.imwrite(f"eval_results/main/Empty/{new_paths[idx].split('/')[-1]}", img)
159 +
160 + # Flip
161 + elif output[idx] == 3:
162 + img = cv2.imread(new_paths[idx])
163 + cv2.imwrite(f"eval_results/main/Flip/{new_paths[idx].split('/')[-1]}", img)
164 +
165 + # Leave
166 + elif output[idx] == 4:
167 + img = cv2.imread(new_paths[idx])
168 + cv2.imwrite(f"eval_results/main/Leave/{new_paths[idx].split('/')[-1]}", img)
169 +
170 + # Pollute
171 + elif output[idx] == 5:
172 + img = cv2.imread(new_paths[idx])
173 + cv2.imwrite(f"eval_results/main/Pollute/{new_paths[idx].split('/')[-1]}", img)
174 +
175 + # Scratch
176 + elif output[idx] == 6:
177 + img = cv2.imread(new_paths[idx])
178 + cv2.imwrite(f"eval_results/main/Scratch/{new_paths[idx].split('/')[-1]}", img)
179 +
180 +
181 +if __name__ == '__main__':
182 + Error_args = get_args_from_yaml("configs/Error_config.yml")
183 + Error_Type_args = get_args_from_yaml("configs/ErrorType_config.yml")
184 + main(Error_args, Error_Type_args)
185 +
...\ No newline at end of file ...\ No newline at end of file
1 +import torch
2 +import torch.nn as nn
3 +from model import mobilenetv3
4 +import argparse
5 +import torchvision
6 +from torchvision.transforms import transforms
7 +import torchvision.datasets as datasets
8 +from augmentations import RandAugment
9 +from get_mean_std import get_params
10 +from torch.utils.data.sampler import SubsetRandomSampler
11 +import numpy as np
12 +import os
13 +import cv2
14 +from utils import MyImageFolder
15 +
16 +class ConcatDataset(torch.utils.data.Dataset):
17 + def __init__(self, *datasets):
18 + self.datasets = datasets
19 +
20 + def __getitem__(self, i):
21 + return tuple(d[i %len(d)] for d in self.datasets)
22 +
23 + def __len__(self):
24 + return max(len(d) for d in self.datasets)
25 +
26 +
27 +def make_dir():
28 + if not os.path.exists('../data/Fourth_data/teacher_data/Double'):
29 + os.mkdir('../data/Fourth_data/teacher_data/Double')
30 +
31 + if not os.path.exists('../data/Fourth_data/teacher_data/Flip'):
32 + os.mkdir('../data/Fourth_data/teacher_data/Flip')
33 +
34 + if not os.path.exists('../data/Fourth_data/teacher_data/Scratch'):
35 + os.mkdir('../data/Fourth_data/teacher_data/Scratch')
36 +
37 + if not os.path.exists('../data/Fourth_data/teacher_data/Leave'):
38 + os.mkdir('../data/Fourth_data/teacher_data/Leave')
39 +
40 + if not os.path.exists('../data/Fourth_data/teacher_data/Normal'):
41 + os.mkdir('../data/Fourth_data/teacher_data/Normal')
42 +
43 + if not os.path.exists('../data/Fourth_data/teacher_data/Empty'):
44 + os.mkdir('../data/Fourth_data/teacher_data/Empty')
45 +
46 +
47 +parser = argparse.ArgumentParser(description='Process make noisy student model')
48 +parser.add_argument('--checkpoint_path', type=str, help='checkpoint path')
49 +parser.add_argument('--size', type=int, help='resize integer of input')
50 +parser.add_argument('--batch_size', type=int, default=256,help='set batch size')
51 +parser.add_argument('--teacher_checkpoint_path', type=str, help='teacher first checkpoint path')
52 +parser.add_argument('--Labeled_dataset_path', default='../data/Fourth_data/noisy_data/Labeled', type=str, help='path of dataset')
53 +parser.add_argument('--Unlabeled_dataset_path', default='../data/Fourth_data/noisy_data/Unlabeled', type=str, help='path of unlabeled dataset')
54 +parser.add_argument('--num_workers', default=8, type=int, help="number of gpu worker")
55 +parser.add_argument('--epochs', default=350, type=int, help='epoch')
56 +parser.add_argument('--finetune_epochs', default=2, type=int, help='finetuning epochs')
57 +parser.add_argument('--data_save_path', default='../data/Fourth_data/teacher_data', type=str, help='teacher save unlabeled data in this path')
58 +args = parser.parse_args()
59 +
60 +print(args)
61 +
62 +# by paper of https://arxiv.org/pdf/1911.04252.pdf
63 +Aug_number = 2
64 +Aug_magnitude = 27
65 +
66 +#my customize network
67 +blocks = [4,5,6,7,8]
68 +
69 +# data loader parameters
70 +kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
71 +
72 +Labeled_mean, Labeled_std = get_params(args.Labeled_dataset_path, args.size)
73 +Unlabeled_mean, Unlabeled_std = get_params(args.Unlabeled_dataset_path, args.size)
74 +
75 +transform_labeled = transforms.Compose([
76 + transforms.Resize((args.size, args.size)),
77 + transforms.RandomCrop(args.size, padding=4),
78 + transforms.RandomHorizontalFlip(),
79 + transforms.ToTensor(),
80 + transforms.Normalize(Labeled_mean[0].item(), Labeled_std[0].item())
81 +])
82 +
83 +#이건 Teacher가 raw data를 받아서 판단하는거기 때문에 따로 Augmentation할 필요 x
84 +transform_unlabeled = transforms.Compose([
85 + transforms.Resize((args.size, args.size)),
86 + transforms.RandomCrop(args.size, padding=4),
87 + transforms.RandomHorizontalFlip(),
88 + transforms.ToTensor(),
89 + transforms.Normalize(Unlabeled_mean[0].item(), Unlabeled_std[0].item())
90 +])
91 +
92 +# Add RandAugment with N, M(hyperparameter)
93 +transform_labeled.transforms.insert(0, RandAugment(Aug_number, Aug_magnitude))
94 +
95 +# set dataset
96 +Labeled_dataset = datasets.ImageFolder(args.Labeled_dataset_path, transform_labeled)
97 +Unlabeled_dataset = MyImageFolder(args.Unlabeled_dataset_path, transform_unlabeled)
98 +
99 +labeled_data_loader = torch.utils.data.DataLoader(
100 + Labeled_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
101 +
102 +unlabeled_data_loader = torch.utils.data.DataLoader(
103 + Unlabeled_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
104 +
105 +# noisy teacher은 student보다 더 작게 설정하며, dropout을 0으로 설정.
106 +noisy_teacher_model = mobilenetv3(n_class=2, dropout=0.0, blocknum=4)
107 +checkpoint = torch.load(args.teacher_checkpoint_path)
108 +noisy_teacher_model.load_state_dict(checkpoint['state_dict'])
109 +
110 +# make loss function
111 +criterion = nn.CrossEntropyLoss()
112 +
113 +# make class directory
114 +make_dir()
115 +
116 +classes = os.listdir(args.data_save_path)
117 +classes.sort()
118 +
119 +for block in blocks:
120 + #noisy student는 더 크게 설정하고 dropout은 논문에 나와있는대로 0.5로 설정.
121 + noisy_student_model = mobilenetv3(n_class=2, dropout=0.5, blocknum=block, stochastic=True)
122 +
123 + noisy_student_model.cuda()
124 + noisy_teacher_model.cuda()
125 + criterion.cuda()
126 +
127 + # make optimizer same as official code lr = 0.128 and decays by 0.97 every 2.4epochs
128 + optimizer = torch.optim.RMSprop(noisy_student_model.parameters(), lr=0.128, weight_decay=0.9, momentum=0.9)
129 +
130 + # exp scheduler like tf offical code
131 + scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,0.7)
132 +
133 + for epoch in range(args.epochs):
134 + # unlabeled data를 labeling하는 과정.
135 + for idx, data in enumerate(unlabeled_data_loader):
136 + (unlabeled_input, _), (path, _) = data
137 +
138 + unlabeled_input = unlabeled_input.cuda()
139 +
140 + output=noisy_teacher_model(unlabeled_input)
141 +
142 + prob = F.softmax(output, dim=1)
143 +
144 + for idx, p in enumerate(prob):
145 + indices = torch.topk(p,1).indices.tolist()
146 +
147 + img = cv2.imread(path[idx])
148 +
149 + cv2.imwrite(f"{args.data_save_path}/{classes[indices[0]]}/{path[idx].split('/')[-1]}", img)
150 +
151 + # teacher 모델이 구성한 data에 대해서 다시 loader 구성.
152 + transform_teacher_data = transforms.Compose([
153 + transforms.Resize((args.size, args.size)),
154 + transforms.RandomCrop(args.size, padding=4),
155 + transforms.RandomHorizontalFlip(),
156 + transforms.ToTensor(),
157 + transforms.Normalize(Unlabeled_mean[0].item(), Unlabeled_std[0].item())
158 + ])
159 + transform_teacher_data.transforms.insert(0, RandAugment(Aug_number, Aug_magnitude))
160 +
161 + teacher_data = datasets.ImageFolder(args.data_save_path, transform_teacher_data)
162 +
163 + teacher_data_loader = torch.utils.data.DataLoader(
164 + teacher_data, batch_size=args.batch_size, shuffle=True, **kwargs)
165 +
166 + merged_dataset = ConcatDataset(teacher_data_loader, labeled_data_loader) #앞은 teacher가 예측한거 뒤는 실제 데이터
167 +
168 + merged_data_loader = torch.utils.data.DataLoader(
169 + merged_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
170 +
171 + #일단 코드상으로는 unlabeled된 data에 대해서 hard하게 구성. todo: soft labeling.
172 + for i, (input, target) in enumerate(merged_data_loader):
173 + input = input.cuda()
174 + target = target.cuda()
175 +
176 + output = noisy_student_model(input)
177 +
178 + loss = criterion(target, output)
179 +
180 + optimizer.zero_grad()
181 + loss.backward()
182 + optimizer.step()
183 +
184 + #논문에서는 2.4epoch마다라고 하였지만 현재는 2에폭마다로 설정.
185 + if epoch % 2 == 0:
186 + scheduler.step()
187 +
188 + # iterative learning.
189 + noisy_teacher_model = noisy_student_model
...\ No newline at end of file ...\ No newline at end of file
1 +from model import mobilenetv3
2 +import torch
3 +import torch.nn as nn
4 +
5 +model = mobilenetv3(n_class=8, blocknum=6)
6 +
7 +model = torch.nn.DataParallel(model)
8 +device = torch.device('cpu')
9 +checkpoint = torch.load('output/All/48860_model=MobilenetV3-ep=3000-block=6-class=8/model_best.pth.tar', map_location = device)
10 +
11 +model.load_state_dict(checkpoint['state_dict'])
12 +
13 +model.to(device)
14 +
15 +model.eval()
16 +
17 +x = torch.randn(256,1,224,224)
18 +
19 +print(x.shape)
20 +
21 +jit_model = torch.jit.trace(model.module,x)
22 +
23 +jit_model.save("mobilenetv3.pt")
24 +
25 +#check jitModel is working
26 +#output = jit_model(torch.ones(3,1,224,224))
27 +#print(output)
...\ No newline at end of file ...\ No newline at end of file
This diff is collapsed. Click to expand it.
1 +adabound==0.0.5
2 +altgraph==0.17
3 +click==7.1.1
4 +cycler==0.10.0
5 +future==0.18.2
6 +kiwisolver==1.1.0
7 +lxml==4.5.0
8 +macholib==1.14
9 +matplotlib==2.2.4
10 +numpy==1.18.1
11 +opencv-python==4.2.0.32
12 +pandas==1.0.3
13 +pefile==2019.4.18
14 +Pillow==6.2.2
15 +protobuf==3.11.3
16 +PyInstaller==3.6
17 +pyparsing==2.4.6
18 +PyQt5==5.14.1
19 +PyQt5-sip==12.7.1
20 +python-dateutil==2.8.1
21 +pytz==2019.3
22 +pywin32-ctypes==0.2.0
23 +PyYAML==5.3.1
24 +scipy==1.4.1
25 +six==1.14.0
26 +tensorboard-logger==0.1.0
27 +torch==1.4.0+cpu
28 +torchvision==0.2.2.post3
29 +tqdm==4.44.1
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
No preview for this file type
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
No preview for this file type