Showing
106 changed files
with
9122 additions
and
0 deletions
UI.py
0 → 100644
1 | +import torch | ||
2 | +import torch.nn as nn | ||
3 | +from model import mobilenetv3 | ||
4 | +from PyQt5.QtCore import pyqtSignal, QThread | ||
5 | +from PyQt5 import QtCore, QtGui, QtWidgets | ||
6 | +from train import UI_train | ||
7 | +from test import UI_validate, UI_test, UI_temp | ||
8 | +import threading, _thread | ||
9 | +import time | ||
10 | +import os | ||
11 | +from queue import Queue | ||
12 | +import multiprocessing | ||
13 | + | ||
14 | +import logging | ||
15 | + | ||
16 | +threads = [] | ||
17 | +logger = logging.getLogger('Techwing_log') | ||
18 | + | ||
19 | +######### 눈금까지 기재된 커스터마이징 슬라이더 ######### | ||
20 | +class LabeledSlider(QtWidgets.QWidget): | ||
21 | + def __init__(self, minimum=1, maximum=11, start_value=4, interval=1, orientation=QtCore.Qt.Horizontal, | ||
22 | + labels=None, p0=4, parent=None): | ||
23 | + super(LabeledSlider, self).__init__(parent=parent) | ||
24 | + | ||
25 | + levels=range(minimum, maximum + interval, interval) | ||
26 | + if labels is not None: | ||
27 | + if not isinstance(labels, (tuple, list)): | ||
28 | + raise Exception("<labels> is a list or tuple.") | ||
29 | + if len(labels) != len(levels): | ||
30 | + raise Exception("Size of <labels> doesn't match levels.") | ||
31 | + self.levels=list(zip(levels,labels)) | ||
32 | + else: | ||
33 | + self.levels=list(zip(levels,map(str,levels))) | ||
34 | + | ||
35 | + if orientation==QtCore.Qt.Horizontal: | ||
36 | + self.layout=QtWidgets.QVBoxLayout(self) | ||
37 | + elif orientation==QtCore.Qt.Vertical: | ||
38 | + self.layout=QtWidgets.QHBoxLayout(self) | ||
39 | + else: | ||
40 | + raise Exception("<orientation> wrong.") | ||
41 | + | ||
42 | + # gives some space to print labels | ||
43 | + self.left_margin=10 | ||
44 | + self.top_margin=10 | ||
45 | + self.right_margin=10 | ||
46 | + self.bottom_margin=10 | ||
47 | + | ||
48 | + self.layout.setContentsMargins(self.left_margin,self.top_margin, | ||
49 | + self.right_margin,self.bottom_margin) | ||
50 | + | ||
51 | + self.sl=QtWidgets.QSlider(orientation, self) | ||
52 | + self.sl.setMinimum(minimum) | ||
53 | + self.sl.setMaximum(maximum) | ||
54 | + self.sl.setValue(start_value) | ||
55 | + self.sl.setSliderPosition(p0) | ||
56 | + if orientation==QtCore.Qt.Horizontal: | ||
57 | + self.sl.setTickPosition(QtWidgets.QSlider.TicksBelow) | ||
58 | + self.sl.setMinimumWidth(300) # just to make it easier to read | ||
59 | + else: | ||
60 | + self.sl.setTickPosition(QtWidgets.QSlider.TicksLeft) | ||
61 | + self.sl.setMinimumHeight(80) # just to make it easier to read | ||
62 | + self.sl.setTickInterval(interval) | ||
63 | + self.sl.setSingleStep(1) | ||
64 | + | ||
65 | + self.layout.addWidget(self.sl) | ||
66 | + | ||
67 | + def paintEvent(self, e): | ||
68 | + | ||
69 | + super(LabeledSlider,self).paintEvent(e) | ||
70 | + style=self.sl.style() | ||
71 | + painter=QtGui.QPainter(self) | ||
72 | + st_slider=QtWidgets.QStyleOptionSlider() | ||
73 | + st_slider.initFrom(self.sl) | ||
74 | + st_slider.orientation=self.sl.orientation() | ||
75 | + | ||
76 | + length=style.pixelMetric(QtWidgets.QStyle.PM_SliderLength, st_slider, self.sl) | ||
77 | + available=style.pixelMetric(QtWidgets.QStyle.PM_SliderSpaceAvailable, st_slider, self.sl) | ||
78 | + | ||
79 | + for v, v_str in self.levels: | ||
80 | + # get the size of the label | ||
81 | + rect=painter.drawText(QtCore.QRect(), QtCore.Qt.TextDontPrint, v_str) | ||
82 | + | ||
83 | + if self.sl.orientation()==QtCore.Qt.Horizontal: | ||
84 | + # I assume the offset is half the length of slider, therefore | ||
85 | + # + length//2 | ||
86 | + x_loc=QtWidgets.QStyle.sliderPositionFromValue(self.sl.minimum(), | ||
87 | + self.sl.maximum(), v, available)+length//2 | ||
88 | + | ||
89 | + # left bound of the text = center - half of text width + L_margin | ||
90 | + left=x_loc-rect.width()//2+self.left_margin | ||
91 | + bottom=self.rect().bottom() | ||
92 | + | ||
93 | + # enlarge margins if clipping | ||
94 | + if v==self.sl.minimum(): | ||
95 | + if left<=0: | ||
96 | + self.left_margin=rect.width()//2-x_loc | ||
97 | + if self.bottom_margin<=rect.height(): | ||
98 | + self.bottom_margin=rect.height() | ||
99 | + | ||
100 | + self.layout.setContentsMargins(self.left_margin, | ||
101 | + self.top_margin, self.right_margin, | ||
102 | + self.bottom_margin) | ||
103 | + | ||
104 | + if v==self.sl.maximum() and rect.width()//2>=self.right_margin: | ||
105 | + self.right_margin=rect.width()//2 | ||
106 | + self.layout.setContentsMargins(self.left_margin, | ||
107 | + self.top_margin, self.right_margin, | ||
108 | + self.bottom_margin) | ||
109 | + | ||
110 | + else: | ||
111 | + y_loc=QtWidgets.QStyle.sliderPositionFromValue(self.sl.minimum(), | ||
112 | + self.sl.maximum(), v, available, upsideDown=True) | ||
113 | + | ||
114 | + bottom=y_loc+length//2+rect.height()//2+self.top_margin-3 | ||
115 | + # there is a 3 px offset that I can't attribute to any metric | ||
116 | + | ||
117 | + left=self.left_margin-rect.width() | ||
118 | + if left<=0: | ||
119 | + self.left_margin=rect.width()+2 | ||
120 | + self.layout.setContentsMargins(self.left_margin, | ||
121 | + self.top_margin, self.right_margin, | ||
122 | + self.bottom_margin) | ||
123 | + | ||
124 | + pos=QtCore.QPoint(left, bottom) | ||
125 | + painter.drawText(pos, v_str) | ||
126 | + | ||
127 | + return | ||
128 | + | ||
129 | +class BaseThread(threading.Thread): | ||
130 | + def __init__(self, callback=None, callback_args=None, *args, **kwargs): | ||
131 | + target = kwargs.pop('target') | ||
132 | + super(BaseThread, self).__init__(target=self.target_with_callback, *args, **kwargs) | ||
133 | + self.callback = callback | ||
134 | + self.method = target | ||
135 | + self.callback_args = callback_args | ||
136 | + | ||
137 | + def target_with_callback(self, *args, **kwargs): | ||
138 | + self.method(*args, **kwargs) | ||
139 | + if self.callback is not None: | ||
140 | + self.callback(*self.callback_args) | ||
141 | + | ||
142 | +# dialog log창 핸들러 | ||
143 | +class QTextEditLogger(logging.Handler): | ||
144 | + def __init__(self, parent): | ||
145 | + super().__init__() | ||
146 | + self.widget = QtWidgets.QTextEdit() | ||
147 | + parent.addWidget(self.widget) | ||
148 | + self.widget.setReadOnly(True) | ||
149 | + | ||
150 | + def emit(self, record): | ||
151 | + msg = self.format(record) | ||
152 | + self.widget.append(msg) | ||
153 | + QtGui.QGuiApplication.processEvents() | ||
154 | + self.widget.moveCursor(QtGui.QTextCursor.End) | ||
155 | + | ||
156 | +# Adding dialog for closeevent. | ||
157 | +class Dialog_form(QtWidgets.QDialog): | ||
158 | + def __init__(self, parent=None): | ||
159 | + super(Dialog_form, self).__init__(parent) | ||
160 | + | ||
161 | + def closeEvent(self, evnt): | ||
162 | + super(Dialog_form, self).closeEvent(evnt) | ||
163 | + _thread.interrupt_main() | ||
164 | + | ||
165 | +# main Dialog | ||
166 | +class Ui_Dialog(QtWidgets.QWidget): | ||
167 | + def setupUi(self, Dialog): | ||
168 | + ######### Default 값 설정 ######### | ||
169 | + self.mode = "Error" | ||
170 | + self.q = Queue() | ||
171 | + self.use_checkpoint=False | ||
172 | + | ||
173 | + ######### 확인을 하기 위해 Default model 설정 ######### | ||
174 | + self.model = mobilenetv3(n_class=2, blocknum=4, dropout=0.5) | ||
175 | + if torch.cuda.is_available(): | ||
176 | + torch.cuda.set_device(0) | ||
177 | + with torch.cuda.device(0): | ||
178 | + self.model = self.model.cuda() | ||
179 | + self.model = torch.nn.DataParallel(self.model, device_ids=[0], output_device=[0]) # 모델을 다른 GPU에 뿌려준 다음 Gradient를 한 군데에서 계산하기 때문에 보통 0번 GPU에 많은 메로리가 할당됨. | ||
180 | + # 하나의 GPU에 많은 메모리가 할당되면 batchsize를 늘릴 수 없기 때문에 이를 해결하기 위하여 output_device를 할당. | ||
181 | + checkpoint = torch.load("output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar") # 해당 코드는 데이터의 크기가 작기 때문에 0번에다가 모두 처리하는 것으로 설정. | ||
182 | + else: | ||
183 | + self.model = torch.nn.DataParallel(self.model) | ||
184 | + device = torch.device("cpu") | ||
185 | + self.model.to(device) | ||
186 | + checkpoint = torch.load("output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar", map_location=torch.device('cpu')) | ||
187 | + | ||
188 | + self.model.load_state_dict(checkpoint['state_dict']) | ||
189 | + | ||
190 | + | ||
191 | + ######### 다이얼로그 설정 및 로그 버튼 프레임 선언 ######### | ||
192 | + Dialog.resize(1500, 900) | ||
193 | + Dialog.setObjectName("Dialog") | ||
194 | + hbox = QtWidgets.QHBoxLayout(Dialog) | ||
195 | + logframe = QtWidgets.QFrame(self) | ||
196 | + buttonframe = QtWidgets.QFrame(self) | ||
197 | + | ||
198 | + logframe.setFrameShape(QtWidgets.QFrame.StyledPanel) | ||
199 | + buttonframe.setFrameShape(QtWidgets.QFrame.StyledPanel) | ||
200 | + | ||
201 | + logLayout = QtWidgets.QVBoxLayout() | ||
202 | + buttonLayout = QtWidgets.QVBoxLayout() | ||
203 | + | ||
204 | + ######### 버튼 선언 ######### | ||
205 | + # Train | ||
206 | + self.pushButton = QtWidgets.QPushButton() | ||
207 | + self.pushButton.setFixedHeight(50) | ||
208 | + | ||
209 | + # Validation | ||
210 | + self.pushButton_2 = QtWidgets.QPushButton() | ||
211 | + self.pushButton_2.setFixedHeight(50) | ||
212 | + | ||
213 | + # Test (dir) | ||
214 | + self.pushButton_3 = QtWidgets.QPushButton() | ||
215 | + self.pushButton_3.setFixedHeight(50) | ||
216 | + | ||
217 | + # Test (file) | ||
218 | + self.pushButton_4 = QtWidgets.QPushButton("Test (file)") | ||
219 | + self.pushButton_4.setFixedHeight(50) | ||
220 | + | ||
221 | + # Temp Test for consistent model | ||
222 | + self.pushButton_5 = QtWidgets.QPushButton("Temp Test") | ||
223 | + self.pushButton_5.setFixedHeight(50) | ||
224 | + | ||
225 | + ######### 모델 실행 버튼 UI ######### | ||
226 | + model_control_layout = QtWidgets.QHBoxLayout() | ||
227 | + model_control_layout.addWidget(self.pushButton) | ||
228 | + model_control_layout.addWidget(self.pushButton_2) | ||
229 | + model_control_layout.addWidget(self.pushButton_3) | ||
230 | + model_control_layout.addWidget(self.pushButton_4) | ||
231 | + #model_control_layout.addWidget(self.pushButton_5) | ||
232 | + | ||
233 | + self.model_control_container = QtWidgets.QWidget() | ||
234 | + self.model_control_container.setLayout(model_control_layout) | ||
235 | + self.model_control_container.setFixedHeight(60) | ||
236 | + | ||
237 | + ######### DATA PATH 관련 UI (dir) ######### | ||
238 | + self.dirpathlabel = QtWidgets.QLabel("data path (dir):") | ||
239 | + self.dirselectedpath = QtWidgets.QLineEdit("no data") | ||
240 | + self.dirselectedpath.setReadOnly(True) | ||
241 | + self.data_dir_select_btn = QtWidgets.QPushButton("...") | ||
242 | + | ||
243 | + self.dirpathlayout = QtWidgets.QHBoxLayout() | ||
244 | + self.dirpathlayout.addWidget(self.dirpathlabel) | ||
245 | + self.dirpathlayout.addWidget(self.dirselectedpath) | ||
246 | + self.dirpathlayout.addWidget(self.data_dir_select_btn) | ||
247 | + | ||
248 | + self.dirpathcontainer = QtWidgets.QWidget() | ||
249 | + self.dirpathcontainer.setLayout(self.dirpathlayout) | ||
250 | + self.dirpathcontainer.setFixedHeight(40) | ||
251 | + | ||
252 | + ######### DATA PATH 관련 UI (file) ######### | ||
253 | + self.filepathlabel = QtWidgets.QLabel("data path (file):") | ||
254 | + self.fileselectedpath = QtWidgets.QLineEdit("no data") | ||
255 | + self.fileselectedpath.setReadOnly(True) | ||
256 | + self.data_file_select_btn = QtWidgets.QPushButton("...") | ||
257 | + | ||
258 | + self.filepathlayout = QtWidgets.QHBoxLayout() | ||
259 | + self.filepathlayout.addWidget(self.filepathlabel) | ||
260 | + self.filepathlayout.addWidget(self.fileselectedpath) | ||
261 | + self.filepathlayout.addWidget(self.data_file_select_btn) | ||
262 | + | ||
263 | + self.filepathcontainer = QtWidgets.QWidget() | ||
264 | + self.filepathcontainer.setLayout(self.filepathlayout) | ||
265 | + self.filepathcontainer.setFixedHeight(40) | ||
266 | + | ||
267 | + ######### CHECKPOINT PATH 관련 UI ######### | ||
268 | + self.ck_pathlabel = QtWidgets.QLabel("checkpoint path :") | ||
269 | + self.ck_selectedpath = QtWidgets.QLineEdit("no checkpoint") | ||
270 | + self.ck_selectedpath.setReadOnly(True) | ||
271 | + self.ck_select_btn = QtWidgets.QPushButton("...") | ||
272 | + | ||
273 | + self.ck_pathlayout = QtWidgets.QHBoxLayout() | ||
274 | + self.ck_pathlayout.addWidget(self.ck_pathlabel) | ||
275 | + self.ck_pathlayout.addWidget(self.ck_selectedpath) | ||
276 | + self.ck_pathlayout.addWidget(self.ck_select_btn) | ||
277 | + | ||
278 | + self.ck_pathcontainer = QtWidgets.QWidget() | ||
279 | + self.ck_pathcontainer.setLayout(self.ck_pathlayout) | ||
280 | + self.ck_pathcontainer.setFixedHeight(40) | ||
281 | + | ||
282 | + ######### Blocknum 조절 ######### | ||
283 | + self.blocknum_slider = LabeledSlider() | ||
284 | + | ||
285 | + ######### 경로 관련 widget들 groupbox에 할당 ######### | ||
286 | + self.path_groupbox = QtWidgets.QGroupBox("경로") | ||
287 | + self.path_layout = QtWidgets.QVBoxLayout() | ||
288 | + self.path_layout.addWidget(self.dirpathcontainer) | ||
289 | + self.path_layout.addWidget(self.filepathcontainer) | ||
290 | + self.path_layout.addWidget(self.ck_pathcontainer) | ||
291 | + self.path_groupbox.setLayout(self.path_layout) | ||
292 | + self.path_groupbox.setFixedHeight(140) | ||
293 | + | ||
294 | + ######### model parameter groupbox에 할당 ######### | ||
295 | + self.model_groupbox = QtWidgets.QGroupBox("모델 블록") | ||
296 | + self.blocknum_layout = QtWidgets.QVBoxLayout() | ||
297 | + self.blocknum_layout.addWidget(self.blocknum_slider) | ||
298 | + self.model_groupbox.setLayout(self.blocknum_layout) | ||
299 | + self.model_groupbox.setFixedHeight(80) | ||
300 | + | ||
301 | + ######### Model 기능 관련 UI ######### (All, Error, ErrorType) | ||
302 | + self.modelayout = QtWidgets.QHBoxLayout() | ||
303 | + self.Errorbtn = QtWidgets.QRadioButton("에러 검출") | ||
304 | + self.Typebtn = QtWidgets.QRadioButton("에러 타입") | ||
305 | + self.Allbtn = QtWidgets.QRadioButton("전체 타입 검출") | ||
306 | + self.Errorbtn.setChecked(True) | ||
307 | + | ||
308 | + self.modelayout.addWidget(self.Errorbtn) | ||
309 | + self.modelayout.addWidget(self.Typebtn) | ||
310 | + self.modelayout.addWidget(self.Allbtn) | ||
311 | + self.modecontainer = QtWidgets.QGroupBox("모델 종류") | ||
312 | + self.modecontainer.setLayout(self.modelayout) | ||
313 | + self.modecontainer.setFixedHeight(70) | ||
314 | + | ||
315 | + ######### 학습 파라미터 관련 UI ######### | ||
316 | + self.train_parameters_layout = QtWidgets.QHBoxLayout() | ||
317 | + self.epoch_label = QtWidgets.QLabel("Epoch :") | ||
318 | + self.epoch_input = QtWidgets.QLineEdit("3000") | ||
319 | + self.epoch_input.setValidator(QtGui.QIntValidator(1,3001)) | ||
320 | + | ||
321 | + self.optim_label = QtWidgets.QLabel("Optim :") | ||
322 | + self.optim_input = QtWidgets.QComboBox() | ||
323 | + self.optim_input.addItem("SGD") | ||
324 | + self.optim_input.addItem("Adam") | ||
325 | + self.optim_input.setFixedWidth(100) | ||
326 | + | ||
327 | + self.lr_label = QtWidgets.QLabel("Learning rate :") | ||
328 | + self.lr_input = QtWidgets.QLineEdit("0.001") | ||
329 | + self.lr_input.setValidator(QtGui.QDoubleValidator(999999, -999999, 8)) | ||
330 | + | ||
331 | + self.batch_label = QtWidgets.QLabel("batch size :") | ||
332 | + self.batch_input = QtWidgets.QLineEdit("256") | ||
333 | + self.batch_input.setValidator(QtGui.QIntValidator(1,1025)) | ||
334 | + | ||
335 | + self.imagesize_label = QtWidgets.QLabel("Image size :") | ||
336 | + self.imagesize_input = QtWidgets.QLineEdit("64") | ||
337 | + self.imagesize_input.setValidator(QtGui.QIntValidator(1,1025)) | ||
338 | + | ||
339 | + self.train_parameters_layout.addWidget(self.epoch_label) | ||
340 | + self.train_parameters_layout.addWidget(self.epoch_input) | ||
341 | + | ||
342 | + self.train_parameters_layout.addWidget(self.optim_label) | ||
343 | + self.train_parameters_layout.addWidget(self.optim_input) | ||
344 | + | ||
345 | + self.train_parameters_layout.addWidget(self.lr_label) | ||
346 | + self.train_parameters_layout.addWidget(self.lr_input) | ||
347 | + | ||
348 | + self.train_parameters_layout.addWidget(self.batch_label) | ||
349 | + self.train_parameters_layout.addWidget(self.batch_input) | ||
350 | + | ||
351 | + self.train_parameters_layout.addWidget(self.imagesize_label) | ||
352 | + self.train_parameters_layout.addWidget(self.imagesize_input) | ||
353 | + | ||
354 | + self.train_parameters = QtWidgets.QGroupBox("학습 파라미터") | ||
355 | + self.train_parameters.setLayout(self.train_parameters_layout) | ||
356 | + self.train_parameters.setFixedHeight(60) | ||
357 | + | ||
358 | + ######### 구성한 Container들 Dialog에 추가 ######### | ||
359 | + buttonLayout.addWidget(self.model_control_container) | ||
360 | + | ||
361 | + buttonLayout.addWidget(self.path_groupbox) | ||
362 | + | ||
363 | + buttonLayout.addWidget(self.model_groupbox) | ||
364 | + | ||
365 | + buttonLayout.addWidget(self.train_parameters) | ||
366 | + | ||
367 | + buttonLayout.addWidget(self.modecontainer) | ||
368 | + | ||
369 | + ######### logger format 설정 ######### (파일로 저장되는 Log랑 다른 Logger이기 때문에 화면에 출력되는 Log와 저장되는 Log랑은 다름) | ||
370 | + logTextBox = QTextEditLogger(logLayout) | ||
371 | + logTextBox.setFormatter(logging.Formatter('%(asctime)s - %(message)s')) | ||
372 | + logging.getLogger('Techwing_log').addHandler(logTextBox) | ||
373 | + logging.getLogger('Techwing_log').setLevel(logging.INFO) | ||
374 | + | ||
375 | + ######### log Widget, Button Widget 비율 설정 부분 ######### | ||
376 | + logframe.setLayout(logLayout) | ||
377 | + buttonframe.setLayout(buttonLayout) | ||
378 | + splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal) | ||
379 | + splitter.addWidget(logframe) | ||
380 | + splitter.addWidget(buttonframe) | ||
381 | + splitter.setSizes([600,200]) | ||
382 | + | ||
383 | + hbox.addWidget(splitter) | ||
384 | + Dialog.setLayout(hbox) | ||
385 | + QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Cleanlooks')) | ||
386 | + | ||
387 | + # 버튼 input word 설정 | ||
388 | + self.retranslateUi(Dialog) | ||
389 | + | ||
390 | + ######### 버튼 기능 설정 부분 ######### | ||
391 | + self.Errorbtn.clicked.connect(self.modeBtnClicked) | ||
392 | + self.Typebtn.clicked.connect(self.modeBtnClicked) | ||
393 | + self.Allbtn.clicked.connect(self.modeBtnClicked) | ||
394 | + self.pushButton.clicked.connect(self.Train_btn_clicked) | ||
395 | + self.pushButton_2.clicked.connect(self.Val_btn_clicked) | ||
396 | + self.pushButton_3.clicked.connect(lambda: self.Test_btn_clicked('dir')) | ||
397 | + self.pushButton_4.clicked.connect(lambda: self.Test_btn_clicked('file')) | ||
398 | + self.pushButton_5.clicked.connect(self.temp_btn_clicked) | ||
399 | + self.data_dir_select_btn.clicked.connect(self.Path_btn_clicked) | ||
400 | + self.data_file_select_btn.clicked.connect(self.filePath_btn_clicked) | ||
401 | + self.ck_select_btn.clicked.connect(self.checkpoint_btn_clicked) | ||
402 | + | ||
403 | + ######### Log 입력해주는 쓰레드 설정 ######### | ||
404 | + c = threading.Thread(target=self.write, args=(self.q,), daemon=True) | ||
405 | + c.start() | ||
406 | + | ||
407 | + QtCore.QMetaObject.connectSlotsByName(Dialog) | ||
408 | + ######### 버튼 UI 설정 ######### | ||
409 | + def retranslateUi(self, Dialog): | ||
410 | + _translate = QtCore.QCoreApplication.translate | ||
411 | + Dialog.setWindowTitle(_translate("Dialog", "Dialog")) | ||
412 | + self.pushButton.setText(_translate("Dialog", "Train")) | ||
413 | + self.pushButton_2.setText(_translate("Dialog", "Validate")) | ||
414 | + self.pushButton_3.setText(_translate("Dialog", "Test (dir)")) | ||
415 | + | ||
416 | + ######### Radio 버튼 동작 이벤트 처리 ######### | ||
417 | + def modeBtnClicked(self): | ||
418 | + if self.Errorbtn.isChecked(): | ||
419 | + self.q.put("set error") | ||
420 | + self.mode = "Error" | ||
421 | + self.blocknum_slider.sl.setValue(4) | ||
422 | + self.blocknum_slider.sl.setSliderPosition(4) | ||
423 | + self.imagesize_input.setText("64") | ||
424 | + elif self.Typebtn.isChecked(): | ||
425 | + self.q.put("set Type") | ||
426 | + self.mode = "Type" | ||
427 | + self.blocknum_slider.sl.setValue(4) | ||
428 | + self.blocknum_slider.sl.setSliderPosition(4) | ||
429 | + self.imagesize_input.setText("64") | ||
430 | + else: | ||
431 | + self.q.put("set All processing") | ||
432 | + self.mode = "All" | ||
433 | + self.blocknum_slider.sl.setValue(6) | ||
434 | + self.blocknum_slider.sl.setSliderPosition(6) | ||
435 | + self.imagesize_input.setText("224") | ||
436 | + | ||
437 | + ######### Train 버튼 동작 이벤트 ######### | ||
438 | + def Train_btn_clicked(self): | ||
439 | + if self.dirselectedpath.text() != "no data": | ||
440 | + self.set_all_btn_enabled(False) | ||
441 | + logging.info("train start") | ||
442 | + | ||
443 | + blocknum = self.blocknum_slider.sl.value() | ||
444 | + kwargs = {"resume": self.use_checkpoint, "blocknum": blocknum} | ||
445 | + kwargs["data_path"] = self.dirselectedpath.text() | ||
446 | + kwargs["epoch"] = int(self.epoch_input.text()) | ||
447 | + kwargs["lr"] = float(self.lr_input.text()) | ||
448 | + kwargs["batch_size"] = int(self.batch_input.text()) | ||
449 | + kwargs["optim"] = str(self.optim_input.currentText()) | ||
450 | + kwargs["size"] = int(self.imagesize_input.text()) | ||
451 | + | ||
452 | + if self.use_checkpoint: | ||
453 | + kwargs["ck_path"] = self.ck_selectedpath.text() | ||
454 | + | ||
455 | + t = BaseThread(target=UI_train, callback=self.set_all_btn_enabled, callback_args=(True,), | ||
456 | + args=(self.mode, self.q), kwargs=kwargs) | ||
457 | + threads.append(t) | ||
458 | + t.start() | ||
459 | + self.q.join() | ||
460 | + else: | ||
461 | + self.q.put("데이터를 입력해 주세요.") | ||
462 | + | ||
463 | + ######### Test 버튼 동작 이벤트 ######### | ||
464 | + def Test_btn_clicked(self, file_mode): | ||
465 | + if self.use_checkpoint: | ||
466 | + self.set_all_btn_enabled(False) | ||
467 | + logging.info('Test start') | ||
468 | + blocknum = self.blocknum_slider.sl.value() | ||
469 | + kwargs = {"use_ck": self.use_checkpoint, "blocknum": blocknum} | ||
470 | + kwargs["size"] = int(self.imagesize_input.text()) | ||
471 | + kwargs["ck_path"] = self.ck_selectedpath.text() | ||
472 | + logging.info(f"start test using path : {self.ck_selectedpath.text()}") | ||
473 | + | ||
474 | + if file_mode == 'dir': | ||
475 | + if self.dirselectedpath.text() != "no data": | ||
476 | + t = BaseThread(target=UI_test, callback=self.set_all_btn_enabled, callback_args=(True,) | ||
477 | + ,args=(self.mode, self.dirselectedpath.text(), file_mode, self.q), kwargs=kwargs) | ||
478 | + t.start() | ||
479 | + self.q.join() | ||
480 | + else: | ||
481 | + self.q.put("데이터를 입력해 주세요.") | ||
482 | + self.set_all_btn_enabled(True) | ||
483 | + else: | ||
484 | + if self.fileselectedpath.text() != "no data": | ||
485 | + t = BaseThread(target=UI_test, callback=self.set_all_btn_enabled, callback_args=(True,) | ||
486 | + ,args=(self.mode, self.fileselectedpath.text(), file_mode, self.q), kwargs=kwargs) | ||
487 | + t.start() | ||
488 | + self.q.join() | ||
489 | + else: | ||
490 | + self.q.put("데이터를 입력해 주세요.") | ||
491 | + self.set_all_btn_enabled(True) | ||
492 | + else: | ||
493 | + self.q.put("체크포인트를 입력해 주세요.") | ||
494 | + | ||
495 | + ######### Validation 버튼 동작 이벤트 ######### | ||
496 | + ## path가 설정되어 있어야된다. | ||
497 | + def Val_btn_clicked(self): | ||
498 | + if self.use_checkpoint: | ||
499 | + if self.dirselectedpath.text() != "no data": | ||
500 | + self.set_all_btn_enabled(False) | ||
501 | + blocknum = self.blocknum_slider.sl.value() | ||
502 | + | ||
503 | + kwargs = {"blocknum": blocknum} | ||
504 | + kwargs["data_path"] = self.dirselectedpath.text() | ||
505 | + kwargs["size"] = int(self.imagesize_input.text()) | ||
506 | + kwargs["ck_path"] = self.ck_selectedpath.text() | ||
507 | + | ||
508 | + logging.info('val start') | ||
509 | + t = BaseThread(target=UI_validate, callback=self.set_all_btn_enabled, callback_args=(True,), | ||
510 | + args=(self.mode, self.q), kwargs=kwargs) | ||
511 | + t.start() | ||
512 | + self.q.join() | ||
513 | + else: | ||
514 | + self.q.put("데이터를 입력해 주세요.") | ||
515 | + else: | ||
516 | + self.q.put("체크포인트를 입력해 주세요.") | ||
517 | + | ||
518 | + def temp_btn_clicked(self): | ||
519 | + self.set_all_btn_enabled(False) | ||
520 | + | ||
521 | + t = BaseThread(target=UI_temp, callback=self.set_all_btn_enabled, callback_args=(True,), | ||
522 | + args=(self.fileselectedpath.text(), self.q, self.model.module)) | ||
523 | + t.start() | ||
524 | + self.q.join() | ||
525 | + | ||
526 | + | ||
527 | + | ||
528 | + ######### 데이터 디렉토리 선택 이벤트 ######### | ||
529 | + def Path_btn_clicked(self): | ||
530 | + fname = QtWidgets.QFileDialog.getExistingDirectory(self, 'Open dir') | ||
531 | + if len(fname) != 0: | ||
532 | + logging.info(f"{fname} Test dir submitted") | ||
533 | + self.dirselectedpath.setText(fname) | ||
534 | + else: | ||
535 | + QtWidgets.QMessageBox.about(self, "Warning", "Do not select Directory!") | ||
536 | + | ||
537 | + ######### 데이터 파일 선택 이벤트 ######### | ||
538 | + def filePath_btn_clicked(self): | ||
539 | + fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', "", | ||
540 | + "All Files(*);; Bitmap files(*.bmp);; Jpg files(*.jpg);; Png files(*.png)") | ||
541 | + if fname[0]: | ||
542 | + logging.info(f"{fname[0]} test file submitted") | ||
543 | + self.fileselectedpath.setText(fname[0]) | ||
544 | + | ||
545 | + else: | ||
546 | + QtWidgets.QMessageBox.about(self, "Warning", "do not select file!") | ||
547 | + | ||
548 | + ######### 체크포인트 파일 선택 이벤트 ######### | ||
549 | + def checkpoint_btn_clicked(self): | ||
550 | + fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', "", | ||
551 | + "All Files(*)") | ||
552 | + if fname[0]: | ||
553 | + self.ck_path = fname | ||
554 | + self.use_checkpoint = True | ||
555 | + logging.info(f"{fname[0]} checkpoint file submitted") | ||
556 | + self.ck_selectedpath.setText(fname[0]) | ||
557 | + | ||
558 | + else: | ||
559 | + QtWidgets.QMessageBox.about(self, "Warning", "do not select file!") | ||
560 | + | ||
561 | + ######### 딥러닝 모델이 작동하였을 때 다른 버튼을 누르면 안되므로 버튼 제어 ######### | ||
562 | + def set_all_btn_enabled(self, mode): | ||
563 | + self.pushButton.setEnabled(mode) | ||
564 | + self.pushButton_2.setEnabled(mode) | ||
565 | + self.pushButton_3.setEnabled(mode) | ||
566 | + self.pushButton_4.setEnabled(mode) | ||
567 | + self.data_dir_select_btn.setEnabled(mode) | ||
568 | + self.data_file_select_btn.setEnabled(mode) | ||
569 | + self.ck_select_btn.setEnabled(mode) | ||
570 | + | ||
571 | + ######### Dialog 로그창 입력 함수 ######### | ||
572 | + def write(self, q): | ||
573 | + while True: | ||
574 | + try: | ||
575 | + log = q.get() | ||
576 | + logger.info(log) | ||
577 | + q.task_done() | ||
578 | + except Queue.Empty: | ||
579 | + pass | ||
580 | + | ||
581 | +if __name__ == "__main__": | ||
582 | + import sys | ||
583 | + import multiprocessing | ||
584 | + multiprocessing.freeze_support() | ||
585 | + app = QtWidgets.QApplication(sys.argv) | ||
586 | + Dialog = Dialog_form() | ||
587 | + ui = Ui_Dialog() | ||
588 | + ui.setupUi(Dialog) | ||
589 | + Dialog.show() | ||
590 | + sys.exit(app.exec_()) |
__init__.py
0 → 100644
File mode changed
__pycache__/MyImageFolder.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/__init__.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/densenet.cpython-36.pyc
0 → 100644
No preview for this file type
No preview for this file type
__pycache__/focal_loss.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/get_mean_std.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/grad_cam.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/make_noisy.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/model.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/models.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/see_model.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/test.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/train.cpython-36.pyc
0 → 100644
No preview for this file type
__pycache__/train.cpython-37.pyc
0 → 100644
No preview for this file type
__pycache__/utils.cpython-36.pyc
0 → 100644
No preview for this file type
augmentations.py
0 → 100644
1 | +# code in this file is adpated from rpmcruz/autoaugment | ||
2 | +# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py | ||
3 | +import random | ||
4 | + | ||
5 | +import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw | ||
6 | +import numpy as np | ||
7 | +import torch | ||
8 | +from PIL import Image | ||
9 | + | ||
10 | + | ||
11 | +def ShearX(img, v): # [-0.3, 0.3] | ||
12 | + assert -0.3 <= v <= 0.3 | ||
13 | + if random.random() > 0.5: | ||
14 | + v = -v | ||
15 | + return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0)) | ||
16 | + | ||
17 | + | ||
18 | +def ShearY(img, v): # [-0.3, 0.3] | ||
19 | + assert -0.3 <= v <= 0.3 | ||
20 | + if random.random() > 0.5: | ||
21 | + v = -v | ||
22 | + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0)) | ||
23 | + | ||
24 | + | ||
25 | +def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45] | ||
26 | + assert -0.45 <= v <= 0.45 | ||
27 | + if random.random() > 0.5: | ||
28 | + v = -v | ||
29 | + v = v * img.size[0] | ||
30 | + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)) | ||
31 | + | ||
32 | + | ||
33 | +def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45] | ||
34 | + assert 0 <= v | ||
35 | + if random.random() > 0.5: | ||
36 | + v = -v | ||
37 | + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)) | ||
38 | + | ||
39 | + | ||
40 | +def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45] | ||
41 | + assert -0.45 <= v <= 0.45 | ||
42 | + if random.random() > 0.5: | ||
43 | + v = -v | ||
44 | + v = v * img.size[1] | ||
45 | + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) | ||
46 | + | ||
47 | + | ||
48 | +def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45] | ||
49 | + assert 0 <= v | ||
50 | + if random.random() > 0.5: | ||
51 | + v = -v | ||
52 | + return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) | ||
53 | + | ||
54 | + | ||
55 | +def Rotate(img, v): # [-30, 30] | ||
56 | + assert -30 <= v <= 30 | ||
57 | + if random.random() > 0.5: | ||
58 | + v = -v | ||
59 | + return img.rotate(v) | ||
60 | + | ||
61 | + | ||
62 | +def AutoContrast(img, _): | ||
63 | + return PIL.ImageOps.autocontrast(img) | ||
64 | + | ||
65 | + | ||
66 | +def Invert(img, _): | ||
67 | + return PIL.ImageOps.invert(img) | ||
68 | + | ||
69 | + | ||
70 | +def Equalize(img, _): | ||
71 | + return PIL.ImageOps.equalize(img) | ||
72 | + | ||
73 | + | ||
74 | +def Flip(img, _): # not from the paper | ||
75 | + return PIL.ImageOps.mirror(img) | ||
76 | + | ||
77 | + | ||
78 | +def Solarize(img, v): # [0, 256] | ||
79 | + assert 0 <= v <= 256 | ||
80 | + return PIL.ImageOps.solarize(img, v) | ||
81 | + | ||
82 | + | ||
83 | +def SolarizeAdd(img, addition=0, threshold=128): | ||
84 | + img_np = np.array(img).astype(np.int) | ||
85 | + img_np = img_np + addition | ||
86 | + img_np = np.clip(img_np, 0, 255) | ||
87 | + img_np = img_np.astype(np.uint8) | ||
88 | + img = Image.fromarray(img_np) | ||
89 | + return PIL.ImageOps.solarize(img, threshold) | ||
90 | + | ||
91 | + | ||
92 | +def Posterize(img, v): # [4, 8] | ||
93 | + v = int(v) | ||
94 | + v = max(1, v) | ||
95 | + return PIL.ImageOps.posterize(img, v) | ||
96 | + | ||
97 | + | ||
98 | +def Contrast(img, v): # [0.1,1.9] | ||
99 | + assert 0.1 <= v <= 1.9 | ||
100 | + return PIL.ImageEnhance.Contrast(img).enhance(v) | ||
101 | + | ||
102 | + | ||
103 | +def Color(img, v): # [0.1,1.9] | ||
104 | + assert 0.1 <= v <= 1.9 | ||
105 | + return PIL.ImageEnhance.Color(img).enhance(v) | ||
106 | + | ||
107 | + | ||
108 | +def Brightness(img, v): # [0.1,1.9] | ||
109 | + assert 0.1 <= v <= 1.9 | ||
110 | + return PIL.ImageEnhance.Brightness(img).enhance(v) | ||
111 | + | ||
112 | + | ||
113 | +def Sharpness(img, v): # [0.1,1.9] | ||
114 | + assert 0.1 <= v <= 1.9 | ||
115 | + return PIL.ImageEnhance.Sharpness(img).enhance(v) | ||
116 | + | ||
117 | + | ||
118 | +def Cutout(img, v): # [0, 60] => percentage: [0, 0.2] | ||
119 | + assert 0.0 <= v <= 0.2 | ||
120 | + if v <= 0.: | ||
121 | + return img | ||
122 | + | ||
123 | + v = v * img.size[0] | ||
124 | + return CutoutAbs(img, v) | ||
125 | + | ||
126 | + | ||
127 | +def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2] | ||
128 | + # assert 0 <= v <= 20 | ||
129 | + if v < 0: | ||
130 | + return img | ||
131 | + w, h = img.size | ||
132 | + x0 = np.random.uniform(w) | ||
133 | + y0 = np.random.uniform(h) | ||
134 | + | ||
135 | + x0 = int(max(0, x0 - v / 2.)) | ||
136 | + y0 = int(max(0, y0 - v / 2.)) | ||
137 | + x1 = min(w, x0 + v) | ||
138 | + y1 = min(h, y0 + v) | ||
139 | + | ||
140 | + xy = (x0, y0, x1, y1) | ||
141 | + color = (125, 123, 114) | ||
142 | + # color = (0, 0, 0) | ||
143 | + img = img.copy() | ||
144 | + PIL.ImageDraw.Draw(img).rectangle(xy, color) | ||
145 | + return img | ||
146 | + | ||
147 | + | ||
148 | +def SamplePairing(imgs): # [0, 0.4] | ||
149 | + def f(img1, v): | ||
150 | + i = np.random.choice(len(imgs)) | ||
151 | + img2 = PIL.Image.fromarray(imgs[i]) | ||
152 | + return PIL.Image.blend(img1, img2, v) | ||
153 | + | ||
154 | + return f | ||
155 | + | ||
156 | + | ||
157 | +def Identity(img, v): | ||
158 | + return img | ||
159 | + | ||
160 | + | ||
161 | +def augment_list(): # 16 oeprations and their ranges | ||
162 | + # https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57 | ||
163 | + # l = [ | ||
164 | + # (Identity, 0., 1.0), | ||
165 | + # (ShearX, 0., 0.3), # 0 | ||
166 | + # (ShearY, 0., 0.3), # 1 | ||
167 | + # (TranslateX, 0., 0.33), # 2 | ||
168 | + # (TranslateY, 0., 0.33), # 3 | ||
169 | + # (Rotate, 0, 30), # 4 | ||
170 | + # (AutoContrast, 0, 1), # 5 | ||
171 | + # (Invert, 0, 1), # 6 | ||
172 | + # (Equalize, 0, 1), # 7 | ||
173 | + # (Solarize, 0, 110), # 8 | ||
174 | + # (Posterize, 4, 8), # 9 | ||
175 | + # # (Contrast, 0.1, 1.9), # 10 | ||
176 | + # (Color, 0.1, 1.9), # 11 | ||
177 | + # (Brightness, 0.1, 1.9), # 12 | ||
178 | + # (Sharpness, 0.1, 1.9), # 13 | ||
179 | + # # (Cutout, 0, 0.2), # 14 | ||
180 | + # # (SamplePairing(imgs), 0, 0.4), # 15 | ||
181 | + # ] | ||
182 | + | ||
183 | + # https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505 | ||
184 | + l = [ | ||
185 | + (AutoContrast, 0, 1), | ||
186 | + (Equalize, 0, 1), | ||
187 | + (Invert, 0, 1), | ||
188 | + (Rotate, 0, 30), | ||
189 | + (Posterize, 0, 4), | ||
190 | + (Solarize, 0, 256), | ||
191 | + (SolarizeAdd, 0, 110), | ||
192 | + (Color, 0.1, 1.9), | ||
193 | + (Contrast, 0.1, 1.9), | ||
194 | + (Brightness, 0.1, 1.9), | ||
195 | + (Sharpness, 0.1, 1.9), | ||
196 | + (ShearX, 0., 0.3), | ||
197 | + (ShearY, 0., 0.3), | ||
198 | + (CutoutAbs, 0, 40), | ||
199 | + (TranslateXabs, 0., 100), | ||
200 | + (TranslateYabs, 0., 100), | ||
201 | + ] | ||
202 | + | ||
203 | + return l | ||
204 | + | ||
205 | + | ||
206 | +class Lighting(object): | ||
207 | + """Lighting noise(AlexNet - style PCA - based noise)""" | ||
208 | + | ||
209 | + def __init__(self, alphastd, eigval, eigvec): | ||
210 | + self.alphastd = alphastd | ||
211 | + self.eigval = torch.Tensor(eigval) | ||
212 | + self.eigvec = torch.Tensor(eigvec) | ||
213 | + | ||
214 | + def __call__(self, img): | ||
215 | + if self.alphastd == 0: | ||
216 | + return img | ||
217 | + | ||
218 | + alpha = img.new().resize_(3).normal_(0, self.alphastd) | ||
219 | + rgb = self.eigvec.type_as(img).clone() \ | ||
220 | + .mul(alpha.view(1, 3).expand(3, 3)) \ | ||
221 | + .mul(self.eigval.view(1, 3).expand(3, 3)) \ | ||
222 | + .sum(1).squeeze() | ||
223 | + | ||
224 | + return img.add(rgb.view(3, 1, 1).expand_as(img)) | ||
225 | + | ||
226 | + | ||
227 | +class CutoutDefault(object): | ||
228 | + """ | ||
229 | + Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py | ||
230 | + """ | ||
231 | + def __init__(self, length): | ||
232 | + self.length = length | ||
233 | + | ||
234 | + def __call__(self, img): | ||
235 | + h, w = img.size(1), img.size(2) | ||
236 | + mask = np.ones((h, w), np.float32) | ||
237 | + y = np.random.randint(h) | ||
238 | + x = np.random.randint(w) | ||
239 | + | ||
240 | + y1 = np.clip(y - self.length // 2, 0, h) | ||
241 | + y2 = np.clip(y + self.length // 2, 0, h) | ||
242 | + x1 = np.clip(x - self.length // 2, 0, w) | ||
243 | + x2 = np.clip(x + self.length // 2, 0, w) | ||
244 | + | ||
245 | + mask[y1: y2, x1: x2] = 0. | ||
246 | + mask = torch.from_numpy(mask) | ||
247 | + mask = mask.expand_as(img) | ||
248 | + img *= mask | ||
249 | + return img | ||
250 | + | ||
251 | + | ||
252 | +class RandAugment: | ||
253 | + def __init__(self, n, m): | ||
254 | + self.n = n # augmentation을 적용하는 수. | ||
255 | + self.m = m # [0, 30] | ||
256 | + self.augment_list = augment_list() | ||
257 | + | ||
258 | + def __call__(self, img): | ||
259 | + ops = random.choices(self.augment_list, k=self.n) | ||
260 | + for op, minval, maxval in ops: | ||
261 | + val = (float(self.m) / 30) * float(maxval - minval) + minval | ||
262 | + img = op(img, val) | ||
263 | + return img | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
configs/All_config.yml
0 → 100644
1 | +task: All | ||
2 | +modelname: MobilenetV3 | ||
3 | +output: output | ||
4 | +checkpoint: "output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar" | ||
5 | +gpu: [2] | ||
6 | +data: | ||
7 | + train: ../data/Fifth_data/All | ||
8 | + val: ../data/Fifth_data/All | ||
9 | + test: ../data/Fifth_data/All | ||
10 | +train: | ||
11 | + epochs: 3000 | ||
12 | + start-epoch: 0 | ||
13 | + batch-size: 256 | ||
14 | + worker: 16 | ||
15 | + resume: '' | ||
16 | + augment: True | ||
17 | + size: 224 | ||
18 | + confidence: False | ||
19 | + weight: [1., 1., 1., 1., 1., 1., 1., 1.] #Crack, Double, Empty, Flip, Leave, Normal, Pollute, Scratch | ||
20 | +predict: | ||
21 | + batch-size: 256 | ||
22 | + worker: 16 | ||
23 | + cam: False | ||
24 | + normalize: True | ||
25 | + save: False | ||
26 | +optimizer: | ||
27 | + type: 'Adam' | ||
28 | + lr: 0.001 | ||
29 | + momentum: 0.9 | ||
30 | + weight_decay: 0.0001 | ||
31 | +loss: | ||
32 | + gamma: 2. | ||
33 | + alpha: 0.8 | ||
34 | +model: | ||
35 | + blocks: 6 | ||
36 | + class: 8 | ||
37 | +etc: | ||
38 | + tensorboard: False | ||
39 | + print_freq: 10 | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
configs/ErrorType_config.yml
0 → 100644
1 | +task: Type | ||
2 | +modelname: MobilenetV3 | ||
3 | +output: output | ||
4 | +checkpoint: "output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar" | ||
5 | +gpu: [1] | ||
6 | +data: | ||
7 | + train: ../data/Fifth_data/ErrorType | ||
8 | + val: ../data/Fifth_data/ErrorType | ||
9 | + test: ../data/Fifth_data/ErrorType | ||
10 | +train: | ||
11 | + epochs: 3000 | ||
12 | + start-epoch: 0 | ||
13 | + batch-size: 256 | ||
14 | + worker: 16 | ||
15 | + resume: '' | ||
16 | + augment: True | ||
17 | + size: 64 | ||
18 | + confidence: False | ||
19 | + weight: [1., 1., 1., 1., 1., 1., 1.] #Crack, Double, Empty, Flip, Leave, Scratch | ||
20 | +predict: | ||
21 | + batch-size: 256 | ||
22 | + worker: 16 | ||
23 | + cam: False | ||
24 | + normalize: True | ||
25 | + save: False | ||
26 | +optimizer: | ||
27 | + type: 'SGD' | ||
28 | + lr: 0.1 | ||
29 | + momentum: 0.9 | ||
30 | + weight_decay: 0.0001 | ||
31 | +loss: | ||
32 | + gamma: 2. | ||
33 | + alpha: 0.8 | ||
34 | +model: | ||
35 | + blocks: 4 | ||
36 | + class: 7 | ||
37 | +etc: | ||
38 | + tensorboard: False | ||
39 | + print_freq: 10 | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
configs/Error_config.yml
0 → 100644
1 | +task: Error | ||
2 | +modelname: MobilenetV3 | ||
3 | +output: output | ||
4 | +checkpoint: "output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar" | ||
5 | +gpu: [1] | ||
6 | +data: | ||
7 | + train: ../data/Fifth_data/Error | ||
8 | + val: ../data/Fifth_data/Error | ||
9 | + test: ../data/Fifth_data/Error | ||
10 | +train: | ||
11 | + epochs: 3000 | ||
12 | + start-epoch: 0 | ||
13 | + batch-size: 256 | ||
14 | + worker: 16 | ||
15 | + resume: '' | ||
16 | + augment: True | ||
17 | + size: 64 | ||
18 | + confidence: False | ||
19 | + weight: [1., 1.] #Error , Normal | ||
20 | +predict: | ||
21 | + batch-size: 256 | ||
22 | + worker: 16 | ||
23 | + cam: False | ||
24 | + cam-class: "Error" | ||
25 | + normalize: True | ||
26 | + save: False | ||
27 | +optimizer: | ||
28 | + type: 'SGD' | ||
29 | + lr: 0.1 | ||
30 | + momentum: 0.9 | ||
31 | + weight_decay: 0.0001 | ||
32 | +loss: | ||
33 | + gamma: 2. | ||
34 | + alpha: 0.8 | ||
35 | +model: | ||
36 | + blocks: 4 | ||
37 | + class: 2 | ||
38 | +etc: | ||
39 | + tensorboard: False | ||
40 | + print_freq: 10 | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
encoder_test.py
0 → 100644
1 | +import torch | ||
2 | +import torchvision | ||
3 | +import torch.nn as nn | ||
4 | +import argparse | ||
5 | +from model import AutoEncoder, pytorch_autoencoder | ||
6 | +from get_mean_std import get_params | ||
7 | +from torchvision.utils import save_image | ||
8 | + | ||
9 | + | ||
10 | +parser = argparse.ArgumentParser(description='Process autoencoder') | ||
11 | +parser.add_argument('--config', type=str, help='select type') | ||
12 | +args = parser.parse_args() | ||
13 | + | ||
14 | +# Scratch에서만 넣은 데이터 | ||
15 | +data_path = "../data/Fourth_data/Auto_test" | ||
16 | +checkpoint_path = "./dc_img/checkpoint.pth" | ||
17 | +resize_size = 128 | ||
18 | +batch_size = 128 | ||
19 | + | ||
20 | +# 보고서를 참고하여 만든 autoencoder 와 pytorch 에서 제공하는 autoencoder | ||
21 | +if args.config == "my": | ||
22 | + model = AutoEncoder().cuda("cuda:1") | ||
23 | +else: | ||
24 | + model = pytorch_autoencoder().cuda("cuda:1") | ||
25 | + | ||
26 | +checkpoint = torch.load(checkpoint_path) | ||
27 | +model.load_state_dict(checkpoint) | ||
28 | +print("checkpoint loaded finish!") | ||
29 | + | ||
30 | +img_transform = torchvision.transforms.Compose([ | ||
31 | + torchvision.transforms.Resize((resize_size, resize_size)), | ||
32 | + torchvision.transforms.Grayscale(), | ||
33 | + torchvision.transforms.ToTensor(), | ||
34 | +]) | ||
35 | + | ||
36 | + | ||
37 | +dataset = torchvision.datasets.ImageFolder(data_path, transform=img_transform) | ||
38 | +dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False) | ||
39 | +criterion = nn.L1Loss() | ||
40 | + | ||
41 | +for idx, data in enumerate(dataloader): | ||
42 | + img, _ = data | ||
43 | + img = img.cuda("cuda:1") | ||
44 | + output = model(img) | ||
45 | + | ||
46 | + save_image(output, f'./dc_img/test_output_{idx}.png') | ||
47 | + | ||
48 | + loss = criterion(output, img) | ||
49 | + | ||
50 | + img = img - output | ||
51 | + | ||
52 | + save_image(img, f'./dc_img/scratch_dif_{idx}.png') | ||
53 | + | ||
54 | +print(f"loss : {loss}") | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
encoder_train.py
0 → 100644
1 | +import torch | ||
2 | +import torchvision | ||
3 | +import torch.nn as nn | ||
4 | +import argparse | ||
5 | +from model import AutoEncoder, pytorch_autoencoder, AutoEncoder_s | ||
6 | +from get_mean_std import get_params | ||
7 | +from torchvision.utils import save_image | ||
8 | + | ||
9 | +parser = argparse.ArgumentParser(description='Process autoencoder') | ||
10 | +parser.add_argument('--config', type=str, help='select type') | ||
11 | +args = parser.parse_args() | ||
12 | + | ||
13 | +# 노말만 넣은 데이터 | ||
14 | +data_path = "../data/Fourth_data/Auto" | ||
15 | +resize_size = 128 | ||
16 | +num_epochs = 100 | ||
17 | +batch_size = 128 | ||
18 | +learning_rate = 1e-3 | ||
19 | + | ||
20 | +# 보고서를 참고하여 만든 autoencoder 와 pytorch 에서 제공하는 autoencoder | ||
21 | +if args.config == "my": | ||
22 | + model = AutoEncoder().cuda("cuda:1") | ||
23 | +elif args.config == "pytorch": | ||
24 | + model = pytorch_autoencoder().cuda("cuda:1") | ||
25 | +else: | ||
26 | + model = AutoEncoder_s().cuda("cuda:1") | ||
27 | + | ||
28 | +print(model) | ||
29 | +#mean, std = get_params(data_path, resize_size) | ||
30 | + | ||
31 | +img_transform = torchvision.transforms.Compose([ | ||
32 | + torchvision.transforms.Resize((resize_size, resize_size)), | ||
33 | + torchvision.transforms.Grayscale(), | ||
34 | + torchvision.transforms.ToTensor(), | ||
35 | +]) | ||
36 | + | ||
37 | +dataset = torchvision.datasets.ImageFolder(data_path, transform=img_transform) | ||
38 | +dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) | ||
39 | + | ||
40 | +criterion = nn.L1Loss() | ||
41 | +optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4) | ||
42 | + | ||
43 | +for epoch in range(num_epochs): | ||
44 | + for data in dataloader: | ||
45 | + img, _ = data | ||
46 | + img = img.cuda("cuda:1") | ||
47 | + output = model(img) | ||
48 | + loss = criterion(output, img) | ||
49 | + | ||
50 | + optimizer.zero_grad() | ||
51 | + loss.backward() | ||
52 | + optimizer.step() | ||
53 | + | ||
54 | + print('epoch [{}/{}], loss:{:.4f}'.format(epoch+1, num_epochs, loss.item())) | ||
55 | + | ||
56 | + if epoch % 10 ==0: | ||
57 | + save_image(output, './dc_img/image_{}.png'.format(epoch)) | ||
58 | + | ||
59 | +torch.save(model.state_dict(), './dc_img/checkpoint.pth') | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
finetune.py
0 → 100644
1 | +import torch | ||
2 | +import torch.nn as nn | ||
3 | +import os | ||
4 | +import shutil | ||
5 | +import logging | ||
6 | +from model import mobilenetv3 | ||
7 | +from utils import get_args_from_yaml | ||
8 | +import torchvision.datasets as datasets | ||
9 | +from utils import AverageMeter, accuracy, printlog, precision, recall | ||
10 | +import torchvision.transforms as transforms | ||
11 | +from torch.utils.data.sampler import SubsetRandomSampler | ||
12 | +import numpy as np | ||
13 | +import time | ||
14 | +from get_mean_std import get_params | ||
15 | + | ||
16 | +model = mobilenetv3(n_class=7, blocknum=6, dropout=0.5) | ||
17 | +model = model.train() | ||
18 | +data_path = "../data/All" | ||
19 | +check_path = "output/All/30114_model=MobilenetV3-ep=3000-block=6-class=8/model_best.pth.tar" | ||
20 | +validation_ratio = 0.1 | ||
21 | +random_seed = 10 | ||
22 | +gpus=[0] | ||
23 | +epochs = 3000 | ||
24 | +resize_size=128 | ||
25 | + | ||
26 | +logger = logging.getLogger() | ||
27 | +logger.setLevel(logging.INFO) | ||
28 | +streamHandler = logging.StreamHandler() | ||
29 | +logger.addHandler(streamHandler) | ||
30 | + | ||
31 | +fileHandler = logging.FileHandler("logs/finetune.log") | ||
32 | +logger.addHandler(fileHandler) | ||
33 | + | ||
34 | + | ||
35 | +def save_checkpoint(state, is_best, block =6, filename='checkpoint.pth.tar'): | ||
36 | + """Saves checkpoint to disk""" | ||
37 | + directory = "%s/%s/" % ('output', 'All') | ||
38 | + if not os.path.exists(directory): | ||
39 | + os.makedirs(directory) | ||
40 | + filename = directory + filename | ||
41 | + torch.save(state, filename) | ||
42 | + logger.info(f"Checkpoint Saved: {filename}") | ||
43 | + best_filename = f"output/All/model_best.pth.tar" | ||
44 | + if is_best: | ||
45 | + shutil.copyfile(filename, best_filename) | ||
46 | + logger.info(f"New Best Checkpoint saved: {best_filename}") | ||
47 | + | ||
48 | + return best_filename | ||
49 | + | ||
50 | +def validate(val_loader, model, criterion, epoch, q=None): | ||
51 | + """Perform validaadd_model_to_queuetion on the validation set""" | ||
52 | + with torch.no_grad(): | ||
53 | + batch_time = AverageMeter() | ||
54 | + losses = AverageMeter() | ||
55 | + top1 = AverageMeter() | ||
56 | + prec = [] | ||
57 | + rec = [] | ||
58 | + | ||
59 | + for i in range(7): | ||
60 | + prec.append(AverageMeter()) | ||
61 | + rec.append(AverageMeter()) | ||
62 | + # switch to evaluate mode | ||
63 | + model.eval() | ||
64 | + end = time.time() | ||
65 | + | ||
66 | + for i, (input, target) in enumerate(val_loader): | ||
67 | + if torch.cuda.is_available(): | ||
68 | + target = target.cuda() | ||
69 | + input = input.cuda() | ||
70 | + | ||
71 | + # compute output | ||
72 | + output = model(input) | ||
73 | + loss = criterion(output, target) | ||
74 | + | ||
75 | + # measure accuracy and record loss | ||
76 | + prec1 = accuracy(output.data, target, topk=(1,))[0] | ||
77 | + | ||
78 | + losses.update(loss.item(), input.size(0)) | ||
79 | + top1.update(prec1.item(), input.size(0)) | ||
80 | + | ||
81 | + for k in range(7): | ||
82 | + prec[k].update(precision(output.data, target, target_class=k), input.size(0)) | ||
83 | + rec[k].update(recall(output.data, target, target_class=k), input.size(0)) | ||
84 | + | ||
85 | + # measure elapsed time | ||
86 | + batch_time.update(time.time() - end) | ||
87 | + end = time.time() | ||
88 | + | ||
89 | + if i % 10 == 0: | ||
90 | + logger.info('Test: [{0}/{1}]\t' | ||
91 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
92 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
93 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})' | ||
94 | + .format( | ||
95 | + i, len(val_loader), batch_time=batch_time, loss=losses, | ||
96 | + top1=top1)) | ||
97 | + | ||
98 | + printlog(' * epoch: {epoch} Prec@1 {top1.avg:.3f}'.format(epoch=epoch,top1=top1), logger, q) | ||
99 | + | ||
100 | + return top1.avg, prec, rec | ||
101 | + | ||
102 | + | ||
103 | +def train(model, train_loader, criterion, optimizer, epoch): | ||
104 | + batch_time = AverageMeter() | ||
105 | + losses = AverageMeter() | ||
106 | + top1 = AverageMeter() | ||
107 | + prec = AverageMeter() | ||
108 | + | ||
109 | + # switch to train mode | ||
110 | + model.train() | ||
111 | + end = time.time() | ||
112 | + | ||
113 | + for i, (input, target) in enumerate(train_loader): | ||
114 | + if torch.cuda.is_available(): | ||
115 | + target = target.cuda() | ||
116 | + input = input.cuda() | ||
117 | + # compute output | ||
118 | + output = model(input) | ||
119 | + loss = criterion(output, target) | ||
120 | + # measure accuracy and record loss | ||
121 | + prec1 = accuracy(output, target, topk=(1,))[0] | ||
122 | + | ||
123 | + losses.update(loss.item(), input.size(0)) | ||
124 | + top1.update(prec1.item(), input.size(0)) | ||
125 | + | ||
126 | + # compute gradient and do SGD step | ||
127 | + optimizer.zero_grad() | ||
128 | + loss.backward() | ||
129 | + optimizer.step() | ||
130 | + | ||
131 | + # measure elapsed time | ||
132 | + batch_time.update(time.time() - end) | ||
133 | + end = time.time() | ||
134 | + | ||
135 | + if i % 10 == 0: | ||
136 | + logger.info('Epoch: [{0}][{1}/{2}]\t' | ||
137 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
138 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
139 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' | ||
140 | + .format( | ||
141 | + epoch, i, len(train_loader), batch_time=batch_time, | ||
142 | + loss=losses, top1=top1)) | ||
143 | + | ||
144 | +for idx, (name, module) in enumerate(model.named_modules()): | ||
145 | + if(idx < 62): | ||
146 | + for param in module.parameters(): | ||
147 | + param.requires_grad = False | ||
148 | + else: | ||
149 | + for param in module.parameters(): | ||
150 | + param.requires_grad = True | ||
151 | + | ||
152 | +mean, std = get_params(data_path, resize_size) | ||
153 | +normalize = transforms.Normalize(mean=[mean[0].item()], | ||
154 | + std=[std[0].item()]) | ||
155 | + | ||
156 | +transform_train = transforms.Compose([ | ||
157 | + transforms.Resize((resize_size, resize_size)), # 가로세로 크기 조정 | ||
158 | + transforms.ColorJitter(0.2,0.2,0.2), # 밝기, 대비, 채도 조정 | ||
159 | + transforms.RandomRotation(2), # -2~ 2도 만큼 회전 | ||
160 | + transforms.RandomAffine(5), # affine 변환 (평행사변형이 된다든지, 사다리꼴이 된다든지) | ||
161 | + transforms.RandomCrop(resize_size, padding=2), # 원본에서 padding을 상하좌우 2로 둔 뒤, 64만큼 자름 | ||
162 | + transforms.RandomHorizontalFlip(), # Data 변환 좌우 반전 | ||
163 | + transforms.Grayscale(), | ||
164 | + transforms.ToTensor(), | ||
165 | + normalize | ||
166 | + ]) | ||
167 | + | ||
168 | +transform_test = transforms.Compose([ | ||
169 | + transforms.Resize((resize_size, resize_size)), | ||
170 | + transforms.Grayscale(), | ||
171 | + transforms.ToTensor(), | ||
172 | + normalize | ||
173 | +]) | ||
174 | + | ||
175 | +kwargs = {'num_workers': 16, 'pin_memory': True} | ||
176 | + | ||
177 | +train_data = datasets.ImageFolder(data_path, transform_train) | ||
178 | +val_data = datasets.ImageFolder(data_path,transform_test) | ||
179 | + | ||
180 | + | ||
181 | +num_train = len(train_data) | ||
182 | +indices = list(range(num_train)) | ||
183 | +split = int(np.floor(validation_ratio * num_train)) | ||
184 | + | ||
185 | +# 랜덤 시드 설정. (Train이나 ,Test 일때 모두 10 이므로 같은 데이터셋이라 할 수 있다) | ||
186 | +np.random.seed(random_seed) | ||
187 | +np.random.shuffle(indices) | ||
188 | + | ||
189 | +# Train set, Validation set 나누기. | ||
190 | +train_idx, valid_idx = indices[split:], indices[:split] | ||
191 | +train_sampler = SubsetRandomSampler(train_idx) | ||
192 | +valid_sampler = SubsetRandomSampler(valid_idx) | ||
193 | + | ||
194 | +train_loader = torch.utils.data.DataLoader( | ||
195 | + train_data, batch_size=256, sampler=train_sampler, #shuffle = True | ||
196 | + **kwargs) | ||
197 | +val_loader = torch.utils.data.DataLoader( | ||
198 | + val_data, batch_size=256, sampler=valid_sampler, #shuffle = False | ||
199 | + **kwargs) | ||
200 | + | ||
201 | +criterion = nn.CrossEntropyLoss() | ||
202 | +optimizer = torch.optim.Adam(model.parameters(), 0.0001, weight_decay=0.0001) | ||
203 | + | ||
204 | +if torch.cuda.is_available(): | ||
205 | + torch.cuda.set_device(gpus[0]) | ||
206 | + with torch.cuda.device(gpus[0]): | ||
207 | + model = model.cuda() | ||
208 | + criterion = criterion.cuda() | ||
209 | + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0]) | ||
210 | + | ||
211 | +checkpoint = torch.load(check_path) | ||
212 | + | ||
213 | +pretrained_dict = checkpoint['state_dict'] | ||
214 | +new_model_dict = model.state_dict() | ||
215 | +for k, v in pretrained_dict.items(): | ||
216 | + if 'classifier' in k: | ||
217 | + continue | ||
218 | + new_model_dict.update({k : v}) | ||
219 | +model.load_state_dict(new_model_dict) | ||
220 | + | ||
221 | +#model.load_state_dict(checkpoint['state_dict'], strict=False) | ||
222 | +best_prec1 = checkpoint['best_prec1'] | ||
223 | + | ||
224 | +for epoch in range(epochs): | ||
225 | + train(model, train_loader, criterion, optimizer, epoch) | ||
226 | + | ||
227 | + prec1, prec, rec = validate(val_loader, model, criterion, epoch) | ||
228 | + | ||
229 | + is_best = prec1 >= best_prec1 | ||
230 | + | ||
231 | + best_prec1 = max(prec1, best_prec1) | ||
232 | + | ||
233 | + checkpoint = save_checkpoint({ | ||
234 | + 'epoch': epoch + 1, | ||
235 | + 'state_dict': model.state_dict(), | ||
236 | + 'best_prec1': best_prec1, | ||
237 | + }, is_best) | ||
238 | + | ||
239 | + | ||
240 | +for i in range(len(prec)): | ||
241 | + logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec[i])) | ||
242 | + logger.info(' * recall {rec.avg:.3f}'.format(rec=rec[i])) | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
focal_loss.py
0 → 100644
1 | +import torch | ||
2 | +import torch.nn as nn | ||
3 | +import torch.nn.functional as F | ||
4 | +from torch.autograd import Variable | ||
5 | + | ||
6 | +class FocalLoss(nn.Module): | ||
7 | + def __init__(self, gamma=0, alpha=None, size_average=True): | ||
8 | + super(FocalLoss, self).__init__() | ||
9 | + self.gamma = gamma | ||
10 | + self.alpha = alpha | ||
11 | + if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha]) | ||
12 | + if isinstance(alpha,list): self.alpha = torch.Tensor(alpha) | ||
13 | + self.size_average = size_average | ||
14 | + | ||
15 | + def forward(self, input, target): | ||
16 | + if input.dim() > 2: | ||
17 | + input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W | ||
18 | + input = input.transpose(1,2) # N,C,H*W => N,H*W,C | ||
19 | + input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C | ||
20 | + target = target.view(-1,1) | ||
21 | + | ||
22 | + logpt = F.log_softmax(input) | ||
23 | + logpt = logpt.gather(1,target) | ||
24 | + logpt = logpt.view(-1) | ||
25 | + pt = Variable(logpt.data.exp()) | ||
26 | + #pt = logpt.data.exp() #pt = (256), input = (256,5), target = (256,1) | ||
27 | + #pt는 logpt에 exponatial 적용 | ||
28 | + | ||
29 | + self.alpha = self.alpha.cuda() | ||
30 | + if self.alpha is not None: | ||
31 | + if self.alpha.type()!=input.data.type(): | ||
32 | + self.alpha = self.alpha.type_as(input.data) | ||
33 | + at = self.alpha.gather(0,target.data.view(-1)) | ||
34 | + logpt = logpt * at | ||
35 | + | ||
36 | + loss = -1 * (1-pt)**self.gamma * logpt | ||
37 | + | ||
38 | + if self.size_average: return loss.mean() | ||
39 | + else: return loss.sum() | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
get_confidence.py
0 → 100644
1 | +import torch.multiprocessing as mp | ||
2 | +import torch | ||
3 | +import torchvision.datasets as datasets | ||
4 | +import torchvision.transforms as transforms | ||
5 | +import argparse | ||
6 | +import numpy as np | ||
7 | +from get_mean_std import get_params | ||
8 | +from model import mobilenetv3 | ||
9 | +import parmap | ||
10 | + | ||
11 | + | ||
12 | +#Image resize 계수 | ||
13 | +resize_size = 64 | ||
14 | + | ||
15 | +#Class 의 개수. | ||
16 | +class_num = 7 | ||
17 | + | ||
18 | +#사용한 Random Seed | ||
19 | +seeds = [39396, 2798, 3843, 62034, 8817, 65014, 45385] | ||
20 | + | ||
21 | +#기기에 있는 GPU 개수. | ||
22 | +gpu = 4 | ||
23 | + | ||
24 | +#저장된 Checkpoint. | ||
25 | +checkpoints = [ | ||
26 | + "output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar", | ||
27 | + "output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar", | ||
28 | + "output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar", | ||
29 | + "output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar", | ||
30 | + "output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar", | ||
31 | + "output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar", | ||
32 | + "output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar" | ||
33 | +] | ||
34 | + | ||
35 | +class AverageMeter(object): | ||
36 | + """Computes and stores the average and current value""" | ||
37 | + | ||
38 | + def __init__(self): | ||
39 | + self.reset() | ||
40 | + | ||
41 | + def reset(self): | ||
42 | + self.val = 0 | ||
43 | + self.avg = 0 | ||
44 | + self.sum = 0 | ||
45 | + self.count = 0 | ||
46 | + | ||
47 | + def update(self, val, n=1): | ||
48 | + self.val = val | ||
49 | + self.sum += val * n | ||
50 | + self.count += n | ||
51 | + self.avg = self.sum / self.count | ||
52 | + | ||
53 | +def accuracy(output, target, topk=(1,)): | ||
54 | + """Computes the precision@k for the specified values of k""" | ||
55 | + maxk = max(topk) | ||
56 | + batch_size = target.size(0) | ||
57 | + | ||
58 | + _, pred = output.topk(maxk, 1, True, True) | ||
59 | + pred = pred.t() | ||
60 | + correct = pred.eq(target.view(1, -1).expand_as(pred)) | ||
61 | + res = [] | ||
62 | + for k in topk: | ||
63 | + correct_k = correct[:k].view(-1).float().sum(0) | ||
64 | + res.append(correct_k.mul_(100.0 / batch_size)) | ||
65 | + return res | ||
66 | + | ||
67 | +def get_models(): | ||
68 | + models=[] | ||
69 | + for idx, checkpoint in enumerate(checkpoints): | ||
70 | + gpu_idx = idx % gpu | ||
71 | + | ||
72 | + weights = torch.load(checkpoint) | ||
73 | + model = mobilenetv3(n_class=class_num) | ||
74 | + | ||
75 | + torch.cuda.set_device(gpu_idx) | ||
76 | + with torch.cuda.device(gpu_idx): | ||
77 | + model = model.cuda() | ||
78 | + | ||
79 | + model = torch.nn.DataParallel(model, device_ids=[gpu_idx], output_device=gpu_idx) | ||
80 | + model.load_state_dict(weights['state_dict']) | ||
81 | + | ||
82 | + model.share_memory() | ||
83 | + models.append(model) | ||
84 | + return models | ||
85 | + | ||
86 | +def get_loader(path, resize_size): | ||
87 | + mean, std = get_params(path, resize_size) | ||
88 | + normalize = transforms.Normalize(mean=[mean[0].item()], | ||
89 | + std=[std[0].item()]) | ||
90 | + | ||
91 | + transform = transforms.Compose([ | ||
92 | + transforms.Resize((resize_size, resize_size)), | ||
93 | + transforms.Grayscale(), | ||
94 | + transforms.ToTensor(), | ||
95 | + normalize | ||
96 | + ]) | ||
97 | + dataset = datasets.ImageFolder(args.path, transform) | ||
98 | + kwargs = {'num_workers': 4, 'pin_memory': True} | ||
99 | + | ||
100 | + loader = torch.utils.data.DataLoader(dataset, batch_size=256, shuffle=False, **kwargs) | ||
101 | + | ||
102 | + return loader | ||
103 | + | ||
104 | +def get_data(processnum ,model, loader, return_dict): | ||
105 | + with torch.no_grad(): | ||
106 | + top1 = AverageMeter() | ||
107 | + model.eval() | ||
108 | + gpu_idx = processnum % gpu | ||
109 | + for i, data in enumerate(loader): | ||
110 | + (input, target) = data | ||
111 | + | ||
112 | + target = target.cuda(gpu_idx) | ||
113 | + input = input.cuda(gpu_idx) | ||
114 | + | ||
115 | + output = model(input) | ||
116 | + | ||
117 | + prec1 = accuracy(output, target, topk=(1,))[0] | ||
118 | + | ||
119 | + top1.update(prec1.item(), input.size(0)) | ||
120 | + | ||
121 | + return_dict[processnum] = top1.avg | ||
122 | + | ||
123 | + | ||
124 | + | ||
125 | +if __name__ == '__main__': | ||
126 | + mp.set_start_method('spawn') | ||
127 | + parser = argparse.ArgumentParser() | ||
128 | + parser.add_argument("--path", required=True, help="path") | ||
129 | + args = parser.parse_args() | ||
130 | + | ||
131 | + manager = mp.Manager() | ||
132 | + return_dict = manager.dict() | ||
133 | + | ||
134 | + | ||
135 | + # get one loader | ||
136 | + loader = get_loader(args.path, resize_size) | ||
137 | + | ||
138 | + # multi model with other checkpoint. | ||
139 | + models = get_models() | ||
140 | + | ||
141 | + #loader is not array so can arise error | ||
142 | + processes = [] | ||
143 | + for i, model in enumerate(models): | ||
144 | + p = mp.Process(target=get_data, args=(i, model, loader, return_dict)) | ||
145 | + p.start() | ||
146 | + processes.append(p) | ||
147 | + | ||
148 | + for p in processes: p.join() | ||
149 | + | ||
150 | + for idx, seed in enumerate(seeds): | ||
151 | + print(f"process {idx}, seed {seed} : {return_dict[idx]}") | ||
152 | + | ||
153 | + print(f"total variance : {np.var(return_dict.values())}") | ||
154 | + #print(return_dict.values()) | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
get_mean_std.py
0 → 100644
1 | +import os | ||
2 | +import numpy as np | ||
3 | +import torch | ||
4 | +import torch.backends.cudnn as cudnn | ||
5 | +import torch.nn as nn | ||
6 | +import torchvision.models as models | ||
7 | +import torchvision.datasets as datasets | ||
8 | +import torchvision.transforms as transforms | ||
9 | +from torchvision.utils import save_image | ||
10 | +from PIL.ImageOps import grayscale | ||
11 | +from PIL import Image | ||
12 | +from torchvision.datasets import ImageFolder | ||
13 | + | ||
14 | +class MyDataset(ImageFolder): | ||
15 | + def __init__(self, root, trainsform): | ||
16 | + super(MyDataset, self).__init__(root, trainsform) | ||
17 | + | ||
18 | + def __getitem__(self, index): | ||
19 | + image, label = super(MyDataset, self).__getitem__(index) | ||
20 | + return image, label | ||
21 | + | ||
22 | + | ||
23 | + | ||
24 | +def get_params(path, resize_size): | ||
25 | + my_transform = transforms.Compose([ | ||
26 | + transforms.Resize((resize_size,resize_size)), | ||
27 | + transforms.Grayscale(), | ||
28 | + transforms.ToTensor() | ||
29 | + ]) | ||
30 | + | ||
31 | + my_dataset = MyDataset(path, my_transform) | ||
32 | + | ||
33 | + loader = torch.utils.data.DataLoader( | ||
34 | + my_dataset, | ||
35 | + batch_size=256, | ||
36 | + num_workers=8, | ||
37 | + shuffle=False | ||
38 | + ) | ||
39 | + | ||
40 | + mean = 0. | ||
41 | + std = 0. | ||
42 | + nb_samples = 0. | ||
43 | + for i, (data, target) in enumerate(loader): | ||
44 | + batch_samples = data.size(0) | ||
45 | + data = data.view(batch_samples, data.size(1), -1) | ||
46 | + mean += data.mean(2).sum(0) | ||
47 | + std += data.std(2).sum(0) | ||
48 | + nb_samples += batch_samples | ||
49 | + | ||
50 | + mean /= nb_samples | ||
51 | + std /= nb_samples | ||
52 | + print(f"mean : {mean} , std : {std}") | ||
53 | + return mean, std | ||
54 | + | ||
55 | +""" | ||
56 | +my_transform = transforms.Compose([ | ||
57 | +transforms.Resize((64,64)), | ||
58 | +transforms.ToTensor() | ||
59 | +]) | ||
60 | + | ||
61 | +my_dataset = MyDataset("../data/Third_data/not_binary", my_transform) | ||
62 | + | ||
63 | +loader = torch.utils.data.DataLoader( | ||
64 | + my_dataset, | ||
65 | + batch_size=256, | ||
66 | + num_workers=8, | ||
67 | + shuffle=False | ||
68 | +) | ||
69 | + | ||
70 | +mean = 0. | ||
71 | +std = 0. | ||
72 | +nb_samples = 0. | ||
73 | +for i, (data, target) in enumerate(loader): | ||
74 | + batch_samples = data.size(0) | ||
75 | + data = data.view(batch_samples, data.size(1), -1) | ||
76 | + mean += data.mean(2).sum(0) | ||
77 | + std += data.std(2).sum(0) | ||
78 | + nb_samples += batch_samples | ||
79 | + | ||
80 | +mean /= nb_samples | ||
81 | +std /= nb_samples | ||
82 | + | ||
83 | +print(f"mean : {mean}, std : {std}") | ||
84 | +""" | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
get_threshold.py
0 → 100644
1 | +import os | ||
2 | +import time | ||
3 | +import sys | ||
4 | +import torch.nn.functional as F | ||
5 | + | ||
6 | +import numpy as np | ||
7 | +import PIL | ||
8 | +import torch | ||
9 | +import torch.backends.cudnn as cudnn | ||
10 | +import torch.nn as nn | ||
11 | +import torch.nn.parallel | ||
12 | +import torch.utils.data | ||
13 | +import torchvision.datasets as datasets | ||
14 | +import torchvision.transforms as transforms | ||
15 | +import yaml | ||
16 | +import cv2 | ||
17 | +from get_mean_std import get_params | ||
18 | +sys.path.append(os.path.join(os.path.dirname(__name__))) | ||
19 | +from model import mobilenetv3 | ||
20 | + | ||
21 | +if not os.path.exists("threshold"): | ||
22 | + os.mkdir("threshold") | ||
23 | + | ||
24 | +thresholds = [.05, .1, .15, .2, .25, .3, .35, .4, .45, .5] | ||
25 | + | ||
26 | +for threshold in thresholds: | ||
27 | + if not os.path.exists(f"threshold/{threshold}"): | ||
28 | + os.mkdir(f"threshold/{threshold}") | ||
29 | + | ||
30 | + | ||
31 | +def get_args_from_yaml(file='trainer/configs/Error_config.yml'): | ||
32 | + with open(file) as f: | ||
33 | + conf = yaml.load(f) | ||
34 | + return conf | ||
35 | + | ||
36 | +class MyImageFolder(datasets.ImageFolder): | ||
37 | + def __getitem__(self, index): | ||
38 | + # return image path | ||
39 | + return super(MyImageFolder, self).__getitem__(index), self.imgs[index] | ||
40 | + | ||
41 | +def main(args): | ||
42 | + run_model(args) | ||
43 | + print(f"[{args['id']}] done") | ||
44 | + | ||
45 | +def run_model(args): | ||
46 | + resize_size = args['train']['size'] | ||
47 | + | ||
48 | + gpus = args['gpu'] | ||
49 | + | ||
50 | + mean, std = get_params(args['data']['train'], resize_size) | ||
51 | + | ||
52 | + normalize = transforms.Normalize(mean=[mean[0].item()], | ||
53 | + std=[std[0].item()]) | ||
54 | + | ||
55 | + normalize_factor = [mean, std] | ||
56 | + | ||
57 | + # data loader | ||
58 | + transform_test = transforms.Compose([ | ||
59 | + transforms.Resize((resize_size,resize_size)), | ||
60 | + transforms.Grayscale(), | ||
61 | + transforms.ToTensor(), | ||
62 | + normalize | ||
63 | + ]) | ||
64 | + kwargs = {'num_workers': args['predict']['worker'], 'pin_memory': True} | ||
65 | + test_data = MyImageFolder(args['data']['val'], transform_test) | ||
66 | + val_loader = torch.utils.data.DataLoader( | ||
67 | + test_data, batch_size=args['predict']['batch-size'], shuffle=False, | ||
68 | + **kwargs) | ||
69 | + | ||
70 | + # load model | ||
71 | + model = mobilenetv3(n_class= args['model']['class'], blocknum= args['model']['blocks']) | ||
72 | + | ||
73 | + torch.cuda.set_device(gpus[0]) | ||
74 | + with torch.cuda.device(gpus[0]): | ||
75 | + model = model.cuda() | ||
76 | + | ||
77 | + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0]) | ||
78 | + | ||
79 | + print("=> loading checkpoint '{}'".format(args['checkpoint'])) | ||
80 | + checkpoint = torch.load(args['checkpoint']) | ||
81 | + model.load_state_dict(checkpoint['state_dict']) | ||
82 | + print("=> loaded checkpoint '{}' (epoch {})" | ||
83 | + .format(args['checkpoint'], checkpoint['epoch'])) | ||
84 | + cudnn.benchmark = True | ||
85 | + | ||
86 | + extract_data(val_loader, model, normalize_factor, args) | ||
87 | + | ||
88 | + | ||
89 | +def extract_data(val_loader, model, normalize_factor, args): | ||
90 | + with torch.no_grad(): | ||
91 | + # switch to evaluate mode | ||
92 | + model.eval() | ||
93 | + for data in(val_loader): | ||
94 | + (input, target), (path , _) = data | ||
95 | + target = target.cuda() | ||
96 | + input = input.cuda() | ||
97 | + | ||
98 | + output = model(input) | ||
99 | + | ||
100 | + print("save data!") | ||
101 | + save_data(output, target, path) | ||
102 | + | ||
103 | +class AverageMeter(object): | ||
104 | + def __init__(self): | ||
105 | + self.reset() | ||
106 | + def reset(self): | ||
107 | + self.val = 0 | ||
108 | + self.avg = 0 | ||
109 | + self.sum = 0 | ||
110 | + self.count = 0 | ||
111 | + def update(self, val, n=1): | ||
112 | + self.val = val | ||
113 | + self.sum += val * n | ||
114 | + self.count += n | ||
115 | + self.avg = self.sum / self.count | ||
116 | + | ||
117 | + | ||
118 | +def accuracy(output, target, topk=(1,)): | ||
119 | + """Computes the precision@k for the specified values of k""" | ||
120 | + maxk = max(topk) | ||
121 | + batch_size = target.size(0) | ||
122 | + _, pred = output.topk(maxk, 1, True, True) | ||
123 | + pred = pred.t() | ||
124 | + correct = pred.eq(target.view(1, -1).expand_as(pred)) | ||
125 | + res = [] | ||
126 | + for k in topk: | ||
127 | + correct_k = correct[:k].view(-1).float().sum(0) | ||
128 | + res.append(correct_k.mul_(100.0 / batch_size)) | ||
129 | + return res | ||
130 | + | ||
131 | +def save_data(output, target, path): | ||
132 | + n_digits = 3 | ||
133 | + prob = F.softmax(output, dim=1) | ||
134 | + prob = torch.round(prob * 10**n_digits) / (10**n_digits) | ||
135 | + for idx, p in enumerate(prob): | ||
136 | + value = torch.topk(p, 2).values | ||
137 | + indice = torch.topk(p,2).indices | ||
138 | + | ||
139 | + value = value.tolist() | ||
140 | + indice = indice.tolist() | ||
141 | + | ||
142 | + gap = abs(value[0]-value[1]) | ||
143 | + for threshold in thresholds: | ||
144 | + if(gap < threshold): | ||
145 | + img = cv2.imread(path[idx]) | ||
146 | + filename = path[idx].split('/')[-1] | ||
147 | + cv2.imwrite(f'threshold/{threshold}/pred_{indice[0]}_{indice[1]}_{filename}', img) | ||
148 | + | ||
149 | +if __name__ == '__main__': | ||
150 | + args = get_args_from_yaml('configs/All_config.yml') | ||
151 | + args['config'] = 'All' | ||
152 | + args['id'] = 'threshold' | ||
153 | + main(args) | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
logs/All/16166_MobilenetV3_block_6.log
0 → 100644
This diff could not be displayed because it is too large.
1 | +2020-03-31-19-11-26 | ||
2 | +use seed 963 | ||
3 | +use dataset : ../data/Fourth_data/All | ||
4 | +{'task': 'All/train_2020-03-31-19-11-26_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-19-11-26'} |
1 | +2020-04-03-17-46-05 | ||
2 | +use seed 635 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'All/train_2020-04-03-17-46-05_model=MobilenetV3-ep=4000-block=6-class=8', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/All', 'test': '../data/Fifth_data/All'}, 'train': {'epochs': 4000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar', 'augment': True, 'size': 224, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 8}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-46-05'} | ||
5 | +Number of model parameters: 462840 | ||
6 | +=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' | ||
7 | +=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000) | ||
8 | +Epoch: [0][0/26] Time 113.958 (113.958) Loss 0.0051 (0.0051) Prec@1 100.000 (100.000) |
1 | +2020-04-08-19-38-36 | ||
2 | +use seed 283 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'All/train_2020-04-08-19-38-36_model=MobilenetV3-ep=3000-block=6-class=8', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/All', 'test': '../data/Fifth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 224, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 8}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-08-19-38-36'} | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\train.py", line 104, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\train.py", line 221, in run_model | ||
10 | + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader)) | ||
11 | +TypeError: object of type 'DataLoader' has no len() | ||
12 | +[train_2020-04-08-19-38-36] failed |
logs/Error/12613_MobilenetV3_block_4.log
0 → 100644
This diff could not be displayed because it is too large.
1 | +2020-03-31-18-30-33 | ||
2 | +use seed 626 | ||
3 | +use dataset : ../data/Fourth_data/Error | ||
4 | +{'task': 'Error/train_2020-03-31-18-30-33_model=MobilenetV3-ep=3000-block=4', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [0], 'data': {'train': '../data/Fourth_data/Error', 'val': '../data/Fourth_data/Error', 'test': '../data/Fourth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 2.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-18-30-33'} | ||
5 | +Number of model parameters: 154706 | ||
6 | +Fatal error in main loop | ||
7 | +Traceback (most recent call last): | ||
8 | + File "E:\code\detection\trainer\train.py", line 91, in main | ||
9 | + run_model(args, q) | ||
10 | + File "E:\code\detection\trainer\train.py", line 264, in run_model | ||
11 | + train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q) | ||
12 | + File "E:\code\detection\trainer\train.py", line 309, in train | ||
13 | + for i, (input, target) in enumerate(train_loader): | ||
14 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__ | ||
15 | + return _MultiProcessingDataLoaderIter(self) | ||
16 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__ | ||
17 | + w.start() | ||
18 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start | ||
19 | + self._popen = self._Popen(self) | ||
20 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen | ||
21 | + return _default_context.get_context().Process._Popen(process_obj) | ||
22 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen | ||
23 | + return Popen(process_obj) | ||
24 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__ | ||
25 | + reduction.dump(process_obj, to_child) | ||
26 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump | ||
27 | + ForkingPickler(file, protocol).dump(obj) | ||
28 | +BrokenPipeError: [Errno 32] Broken pipe | ||
29 | +[train_2020-03-31-18-30-33] failed |
1 | +2020-04-01-17-53-24 | ||
2 | +use seed 420 | ||
3 | +use dataset : ../data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-01-17-53-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-17-53-24'} | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\train.py", line 91, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\train.py", line 125, in run_model | ||
10 | + mean, std = get_params(args['data']['train'], resize_size) | ||
11 | + File "E:\code\detection\trainer\get_mean_std.py", line 31, in get_params | ||
12 | + my_dataset = MyDataset(path, my_transform) | ||
13 | + File "E:\code\detection\trainer\get_mean_std.py", line 16, in __init__ | ||
14 | + super(MyDataset, self).__init__(root, trainsform) | ||
15 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 209, in __init__ | ||
16 | + target_transform=target_transform) | ||
17 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 83, in __init__ | ||
18 | + classes, class_to_idx = self._find_classes(root) | ||
19 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 116, in _find_classes | ||
20 | + classes = [d.name for d in os.scandir(dir) if d.is_dir()] | ||
21 | +FileNotFoundError: [WinError 3] 지정된 경로를 찾을 수 없습니다: '../data/Fifth_data/Error' | ||
22 | +[train_2020-04-01-17-53-24] failed |
1 | +2020-04-01-18-16-36 | ||
2 | +use seed 95 | ||
3 | +use dataset : ../data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-01-18-16-36_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-18-16-36'} | ||
5 | +Number of model parameters: 154706 | ||
6 | +Epoch: [0][0/7] Time 43.018 (43.018) Loss 0.6986 (0.6986) Prec@1 34.473 (34.473) Precision 0.000 (0.000) |
1 | +2020-04-01-20-18-29 | ||
2 | +use seed 997 | ||
3 | +use dataset : ../data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-01-20-18-29_model=MobilenetV3-ep=3000-block=6-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-20-18-29'} | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\train.py", line 93, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\train.py", line 127, in run_model | ||
10 | + mean, std = get_params(args['data']['train'], resize_size) | ||
11 | + File "E:\code\detection\trainer\get_mean_std.py", line 43, in get_params | ||
12 | + for i, (data, target) in enumerate(loader): | ||
13 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__ | ||
14 | + return _MultiProcessingDataLoaderIter(self) | ||
15 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__ | ||
16 | + w.start() | ||
17 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start | ||
18 | + self._popen = self._Popen(self) | ||
19 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen | ||
20 | + return _default_context.get_context().Process._Popen(process_obj) | ||
21 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen | ||
22 | + return Popen(process_obj) | ||
23 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__ | ||
24 | + reduction.dump(process_obj, to_child) | ||
25 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump | ||
26 | + ForkingPickler(file, protocol).dump(obj) | ||
27 | +BrokenPipeError: [Errno 32] Broken pipe | ||
28 | +[train_2020-04-01-20-18-29] failed |
1 | +2020-04-01-21-07-25 | ||
2 | +use seed 880 | ||
3 | +use dataset : ../data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-01-21-07-25_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-21-07-25'} |
1 | +2020-04-01-22-40-24 | ||
2 | +use seed 238 | ||
3 | +use dataset : ../data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-01-22-40-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-22-40-24'} | ||
5 | +Number of model parameters: 154706 | ||
6 | +Epoch: [0][0/7] Time 44.533 (44.533) Loss 0.6956 (0.6956) Prec@1 41.504 (41.504) Precision 0.000 (0.000) |
1 | +2020-04-01-23-15-24 | ||
2 | +use seed 666 | ||
3 | +use dataset : ../data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-01-23-15-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-23-15-24'} |
1 | +2020-04-03-17-02-42 | ||
2 | +use seed 185 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'Error/train_2020-04-03-17-02-42_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-02-42'} | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\train.py", line 102, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\train.py", line 195, in run_model | ||
10 | + **kwargs) | ||
11 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__ | ||
12 | + batch_sampler = BatchSampler(sampler, batch_size, drop_last) | ||
13 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__ | ||
14 | + "but got batch_size={}".format(batch_size)) | ||
15 | +ValueError: batch_size should be a positive integer value, but got batch_size=256 | ||
16 | +[train_2020-04-03-17-02-42] failed |
1 | +2020-04-03-17-04-30 | ||
2 | +use seed 54 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'Error/train_2020-04-03-17-04-30_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-04-30'} | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\train.py", line 103, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\train.py", line 196, in run_model | ||
10 | + **kwargs) | ||
11 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__ | ||
12 | + batch_sampler = BatchSampler(sampler, batch_size, drop_last) | ||
13 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__ | ||
14 | + "but got batch_size={}".format(batch_size)) | ||
15 | +ValueError: batch_size should be a positive integer value, but got batch_size=256 | ||
16 | +[train_2020-04-03-17-04-30] failed |
1 | +2020-04-03-17-07-00 | ||
2 | +use seed 809 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'Error/train_2020-04-03-17-07-00_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-07-00'} | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\train.py", line 103, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\train.py", line 196, in run_model | ||
10 | + **kwargs) | ||
11 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__ | ||
12 | + batch_sampler = BatchSampler(sampler, batch_size, drop_last) | ||
13 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__ | ||
14 | + "but got batch_size={}".format(batch_size)) | ||
15 | +ValueError: batch_size should be a positive integer value, but got batch_size=256 | ||
16 | +[train_2020-04-03-17-07-00] failed |
1 | +2020-04-03-17-08-43 | ||
2 | +use seed 420 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'Error/train_2020-04-03-17-08-43_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-08-43'} | ||
5 | +Number of model parameters: 154706 | ||
6 | +Fatal error in main loop | ||
7 | +Traceback (most recent call last): | ||
8 | + File "E:\code\detection\trainer\train.py", line 102, in main | ||
9 | + run_model(args, q) | ||
10 | + File "E:\code\detection\trainer\train.py", line 276, in run_model | ||
11 | + train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q) | ||
12 | + File "E:\code\detection\trainer\train.py", line 327, in train | ||
13 | + loss = criterion(output, target) | ||
14 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 532, in __call__ | ||
15 | + result = self.forward(*input, **kwargs) | ||
16 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\loss.py", line 916, in forward | ||
17 | + ignore_index=self.ignore_index, reduction=self.reduction) | ||
18 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 2021, in cross_entropy | ||
19 | + return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction) | ||
20 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 1838, in nll_loss | ||
21 | + ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index) | ||
22 | +IndexError: Target 5 is out of bounds. | ||
23 | +[train_2020-04-03-17-08-43] failed | ||
24 | +2020-04-03-17-09-59 | ||
25 | +use seed 420 | ||
26 | +use dataset : E:/code/detection/data/Fifth_data/Error | ||
27 | +{'task': 'Error/train_2020-04-03-17-09-59_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-09-59'} | ||
28 | +Number of model parameters: 154706 | ||
29 | +Epoch: [0][0/26] Time 32.853 (32.853) Loss 0.6907 (0.6907) Prec@1 56.641 (56.641) Precision 0.000 (0.000) |
1 | +2020-04-03-17-09-59 | ||
2 | +use seed 420 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-03-17-09-59_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-09-59'} | ||
5 | +Number of model parameters: 154706 | ||
6 | +Epoch: [0][0/26] Time 32.853 (32.853) Loss 0.6907 (0.6907) Prec@1 56.641 (56.641) Precision 0.000 (0.000) |
1 | +2020-04-03-17-22-28 | ||
2 | +use seed 845 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/Error | ||
4 | +{'task': 'Error/train_2020-04-03-17-22-28_model=MobilenetV3-ep=1000-block=5-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 1000, 'start-epoch': 0, 'batch-size': 128, 'worker': 16, 'resume': '', 'augment': True, 'size': 128, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 128, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.01, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 5, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-22-28'} | ||
5 | +Number of model parameters: 400114 |
1 | +2020-04-08-19-37-53 | ||
2 | +use seed 41 | ||
3 | +use dataset : E:/code/detection/data/Fifth_data/All | ||
4 | +{'task': 'Error/train_2020-04-08-19-37-53_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-08-19-37-53'} |
logs/ErrorType/2715_MobilenetV3_block_6.log
0 → 100644
This diff could not be displayed because it is too large.
1 | +2020-04-01-19-51-23 | ||
2 | +use seed 355 | ||
3 | +use dataset : ../data/Fifth_data/ErrorType | ||
4 | +{'task': 'ErrorType/train_2020-04-01-19-51-23_model=MobilenetV3-ep=3000-block=6-class=7', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/ErrorType', 'val': '../data/Fifth_data/ErrorType', 'test': '../data/Fifth_data/ErrorType'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 4048, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 4048, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-19-51-23'} | ||
5 | +Number of model parameters: 461559 |
1 | +2020-04-01-22-42-16 | ||
2 | +use seed 805 | ||
3 | +use dataset : ../data/Fifth_data/ErrorType | ||
4 | +{'task': 'Type/train_2020-04-01-22-42-16_model=MobilenetV3-ep=3000-block=6-class=7', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/ErrorType', 'val': '../data/Fifth_data/ErrorType', 'test': '../data/Fifth_data/ErrorType'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 4048, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 4048, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-22-42-16'} | ||
5 | +Number of model parameters: 461559 |
logs/runs/All/eval.log
0 → 100644
1 | +Number of model parameters: 161111 | ||
2 | +=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1304) | ||
4 | +Test: [0/14] Time 4.107 (4.107) Loss 66.0858 (66.0858) Prec@1 12.891 (12.891) | ||
5 | +Test: [1/14] Time 0.289 (2.198) Loss 64.3240 (65.2049) Prec@1 15.625 (14.258) | ||
6 | +Test: [2/14] Time 0.362 (1.586) Loss 65.8078 (65.4059) Prec@1 13.672 (14.062) | ||
7 | +Test: [3/14] Time 0.347 (1.276) Loss 68.0060 (66.0559) Prec@1 10.156 (13.086) | ||
8 | +Test: [4/14] Time 0.259 (1.073) Loss 68.7825 (66.6012) Prec@1 10.156 (12.500) | ||
9 | +Test: [5/14] Time 0.285 (0.941) Loss 67.9427 (66.8248) Prec@1 12.109 (12.435) | ||
10 | +Test: [6/14] Time 0.346 (0.856) Loss 66.3187 (66.7525) Prec@1 11.328 (12.277) | ||
11 | +Test: [7/14] Time 0.246 (0.780) Loss 65.7671 (66.6293) Prec@1 14.062 (12.500) | ||
12 | +Test: [8/14] Time 0.316 (0.728) Loss 65.9718 (66.5563) Prec@1 12.109 (12.457) | ||
13 | +Test: [9/14] Time 0.236 (0.679) Loss 64.3964 (66.3403) Prec@1 14.844 (12.695) | ||
14 | +Test: [10/14] Time 0.291 (0.644) Loss 65.9720 (66.3068) Prec@1 11.328 (12.571) | ||
15 | +Test: [11/14] Time 0.339 (0.619) Loss 66.6106 (66.3321) Prec@1 12.109 (12.533) | ||
16 | +Test: [12/14] Time 0.243 (0.590) Loss 64.1202 (66.1620) Prec@1 14.062 (12.650) | ||
17 | +Test: [13/14] Time 0.551 (0.587) Loss 63.1240 (66.0631) Prec@1 13.393 (12.674) | ||
18 | + * Prec@1 12.674 | ||
19 | +Best accuracy: 90.11627924719522 | ||
20 | +[eval] done | ||
21 | +Number of model parameters: 161111 | ||
22 | +=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
23 | +=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1304) | ||
24 | +Test: [0/14] Time 3.525 (3.525) Loss 0.2238 (0.2238) Prec@1 91.797 (91.797) | ||
25 | +Test: [1/14] Time 0.027 (1.776) Loss 0.1470 (0.1854) Prec@1 93.750 (92.773) | ||
26 | +Test: [2/14] Time 0.025 (1.192) Loss 0.2031 (0.1913) Prec@1 93.359 (92.969) | ||
27 | +Test: [3/14] Time 0.021 (0.900) Loss 0.1564 (0.1826) Prec@1 94.922 (93.457) | ||
28 | +Test: [4/14] Time 0.025 (0.725) Loss 0.1664 (0.1793) Prec@1 93.359 (93.438) | ||
29 | +Test: [5/14] Time 0.082 (0.617) Loss 0.3156 (0.2020) Prec@1 89.844 (92.839) | ||
30 | +Test: [6/14] Time 0.028 (0.533) Loss 0.2379 (0.2072) Prec@1 92.188 (92.746) | ||
31 | +Test: [7/14] Time 0.021 (0.469) Loss 0.1547 (0.2006) Prec@1 95.312 (93.066) | ||
32 | +Test: [8/14] Time 0.032 (0.421) Loss 0.2915 (0.2107) Prec@1 90.625 (92.795) | ||
33 | +Test: [9/14] Time 0.028 (0.381) Loss 0.2297 (0.2126) Prec@1 92.188 (92.734) | ||
34 | +Test: [10/14] Time 0.023 (0.349) Loss 0.1834 (0.2099) Prec@1 94.922 (92.933) | ||
35 | +Test: [11/14] Time 0.028 (0.322) Loss 0.1838 (0.2078) Prec@1 94.141 (93.034) | ||
36 | +Test: [12/14] Time 0.027 (0.299) Loss 0.1991 (0.2071) Prec@1 92.188 (92.969) | ||
37 | +Test: [13/14] Time 0.043 (0.281) Loss 0.0723 (0.2027) Prec@1 97.321 (93.110) | ||
38 | + * Prec@1 93.110 | ||
39 | +Best accuracy: 93.11046504530796 | ||
40 | +[eval] done | ||
41 | +Number of model parameters: 161111 | ||
42 | +=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
43 | +=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
44 | +Test: [0/14] Time 4.212 (4.212) Loss 52.0523 (52.0523) Prec@1 16.406 (16.406) | ||
45 | +Test: [1/14] Time 0.064 (2.138) Loss 54.9665 (53.5094) Prec@1 12.891 (14.648) | ||
46 | +Test: [2/14] Time 0.065 (1.447) Loss 52.2416 (53.0868) Prec@1 16.016 (15.104) | ||
47 | +Test: [3/14] Time 0.062 (1.101) Loss 52.7707 (53.0078) Prec@1 14.453 (14.941) | ||
48 | +Test: [4/14] Time 0.059 (0.892) Loss 53.9052 (53.1873) Prec@1 14.062 (14.766) | ||
49 | +Test: [5/14] Time 0.075 (0.756) Loss 53.5808 (53.2528) Prec@1 10.938 (14.128) | ||
50 | +Test: [6/14] Time 0.079 (0.659) Loss 54.1428 (53.3800) Prec@1 12.109 (13.839) | ||
51 | +Test: [7/14] Time 0.064 (0.585) Loss 56.6207 (53.7851) Prec@1 10.156 (13.379) | ||
52 | +Test: [8/14] Time 0.061 (0.527) Loss 54.8389 (53.9022) Prec@1 12.891 (13.325) | ||
53 | +Test: [9/14] Time 0.072 (0.481) Loss 58.1075 (54.3227) Prec@1 9.766 (12.969) | ||
54 | +Test: [10/14] Time 0.070 (0.444) Loss 57.8013 (54.6389) Prec@1 7.812 (12.500) | ||
55 | +Test: [11/14] Time 0.076 (0.413) Loss 55.8743 (54.7419) Prec@1 10.938 (12.370) | ||
56 | +Test: [12/14] Time 0.058 (0.386) Loss 51.2172 (54.4708) Prec@1 16.406 (12.680) | ||
57 | +Test: [13/14] Time 0.060 (0.363) Loss 53.5995 (54.4424) Prec@1 12.500 (12.674) | ||
58 | + * Prec@1 12.674 | ||
59 | +Creating CAM | ||
60 | +Fatal error in main loop | ||
61 | +Traceback (most recent call last): | ||
62 | + File "test.py", line 173, in main | ||
63 | + run_model(args, q) | ||
64 | + File "test.py", line 255, in run_model | ||
65 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
66 | + File "test.py", line 313, in validate | ||
67 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
68 | +KeyError: 'cam-class' | ||
69 | +[eval] failed | ||
70 | +Number of model parameters: 161111 | ||
71 | +=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
72 | +=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
73 | +Test: [0/14] Time 4.173 (4.173) Loss 0.2717 (0.2717) Prec@1 89.062 (89.062) | ||
74 | +Test: [1/14] Time 0.023 (2.098) Loss 0.3759 (0.3238) Prec@1 89.062 (89.062) | ||
75 | +Test: [2/14] Time 0.024 (1.407) Loss 0.2627 (0.3035) Prec@1 91.016 (89.714) | ||
76 | +Test: [3/14] Time 0.235 (1.114) Loss 0.2139 (0.2811) Prec@1 92.969 (90.527) | ||
77 | +Test: [4/14] Time 0.014 (0.894) Loss 0.2226 (0.2694) Prec@1 92.969 (91.016) | ||
78 | +Test: [5/14] Time 0.013 (0.747) Loss 0.2957 (0.2738) Prec@1 90.625 (90.951) | ||
79 | +Test: [6/14] Time 0.014 (0.642) Loss 0.3337 (0.2823) Prec@1 89.453 (90.737) | ||
80 | +Test: [7/14] Time 0.014 (0.564) Loss 0.3640 (0.2925) Prec@1 89.062 (90.527) | ||
81 | +Test: [8/14] Time 0.014 (0.503) Loss 0.2417 (0.2869) Prec@1 91.016 (90.582) | ||
82 | +Test: [9/14] Time 0.013 (0.454) Loss 0.2122 (0.2794) Prec@1 91.406 (90.664) | ||
83 | +Test: [10/14] Time 0.011 (0.414) Loss 0.1912 (0.2714) Prec@1 94.531 (91.016) | ||
84 | +Test: [11/14] Time 0.013 (0.380) Loss 0.3103 (0.2746) Prec@1 91.016 (91.016) | ||
85 | +Test: [12/14] Time 0.012 (0.352) Loss 0.2584 (0.2734) Prec@1 92.188 (91.106) | ||
86 | +Test: [13/14] Time 0.039 (0.330) Loss 0.3947 (0.2773) Prec@1 89.286 (91.047) | ||
87 | + * Prec@1 91.047 | ||
88 | +Creating CAM | ||
89 | +Fatal error in main loop | ||
90 | +Traceback (most recent call last): | ||
91 | + File "test.py", line 173, in main | ||
92 | + run_model(args, q) | ||
93 | + File "test.py", line 255, in run_model | ||
94 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
95 | + File "test.py", line 313, in validate | ||
96 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
97 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 164, in make_grad_cam | ||
98 | + image_paths.remove('eval_results/Error/error_case/cam') | ||
99 | +ValueError: list.remove(x): x not in list | ||
100 | +[eval] failed | ||
101 | +Number of model parameters: 161111 | ||
102 | +=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
103 | +=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
104 | +Test: [0/14] Time 4.361 (4.361) Loss 0.3446 (0.3446) Prec@1 89.844 (89.844) | ||
105 | +Test: [1/14] Time 0.020 (2.190) Loss 0.2700 (0.3073) Prec@1 91.016 (90.430) | ||
106 | +Test: [2/14] Time 0.218 (1.533) Loss 0.3154 (0.3100) Prec@1 91.406 (90.755) | ||
107 | +Test: [3/14] Time 0.018 (1.154) Loss 0.2044 (0.2836) Prec@1 90.625 (90.723) | ||
108 | +Test: [4/14] Time 0.019 (0.927) Loss 0.3468 (0.2963) Prec@1 88.672 (90.312) | ||
109 | +Test: [5/14] Time 0.019 (0.776) Loss 0.2885 (0.2950) Prec@1 89.453 (90.169) | ||
110 | +Test: [6/14] Time 0.017 (0.667) Loss 0.2948 (0.2949) Prec@1 91.016 (90.290) | ||
111 | +Test: [7/14] Time 0.016 (0.586) Loss 0.2294 (0.2867) Prec@1 92.969 (90.625) | ||
112 | +Test: [8/14] Time 0.018 (0.523) Loss 0.3430 (0.2930) Prec@1 89.844 (90.538) | ||
113 | +Test: [9/14] Time 0.016 (0.472) Loss 0.2377 (0.2875) Prec@1 92.188 (90.703) | ||
114 | +Test: [10/14] Time 0.013 (0.430) Loss 0.3161 (0.2901) Prec@1 92.578 (90.874) | ||
115 | +Test: [11/14] Time 0.014 (0.396) Loss 0.2830 (0.2895) Prec@1 89.453 (90.755) | ||
116 | +Test: [12/14] Time 0.010 (0.366) Loss 0.1243 (0.2768) Prec@1 96.094 (91.166) | ||
117 | +Test: [13/14] Time 0.051 (0.344) Loss 0.2939 (0.2773) Prec@1 87.500 (91.047) | ||
118 | + * Prec@1 91.047 | ||
119 | +Creating CAM | ||
120 | +Fatal error in main loop | ||
121 | +Traceback (most recent call last): | ||
122 | + File "test.py", line 173, in main | ||
123 | + run_model(args, q) | ||
124 | + File "test.py", line 255, in run_model | ||
125 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
126 | + File "test.py", line 313, in validate | ||
127 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'], args=args) | ||
128 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 220, in make_grad_cam | ||
129 | + image_paths[j].split('/')[-1], arch, target_layer, classes[ids[j, i]] | ||
130 | +IndexError: list index out of range | ||
131 | +[eval] failed | ||
132 | +Number of model parameters: 161111 | ||
133 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
134 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 1647) | ||
135 | +Test: [0/2] Time 4.475 (4.475) Loss 0.4215 (0.4215) Prec@1 92.578 (92.578) | ||
136 | +Test: [1/2] Time 0.047 (2.261) Loss 0.9263 (0.5382) Prec@1 88.312 (91.592) | ||
137 | + * Prec@1 91.592 | ||
138 | +Creating CAM | ||
139 | +Fatal error in main loop | ||
140 | +Traceback (most recent call last): | ||
141 | + File "test.py", line 173, in main | ||
142 | + run_model(args, q) | ||
143 | + File "test.py", line 255, in run_model | ||
144 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
145 | + File "test.py", line 313, in validate | ||
146 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'], args=args) | ||
147 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 221, in make_grad_cam | ||
148 | + image_paths[j].split('/')[-1], arch, target_layer, classes[ids[j, i]] | ||
149 | +IndexError: list index out of range | ||
150 | +[eval] failed | ||
151 | +Number of model parameters: 161111 | ||
152 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
153 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
154 | +Test: [0/2] Time 5.110 (5.110) Loss 0.3949 (0.3949) Prec@1 92.969 (92.969) | ||
155 | +Test: [1/2] Time 0.040 (2.575) Loss 0.3570 (0.3862) Prec@1 96.104 (93.694) | ||
156 | + * Prec@1 93.694 | ||
157 | +Best accuracy: 93.6936941519156 | ||
158 | +[eval] done | ||
159 | +Number of model parameters: 161111 | ||
160 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
161 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
162 | +Fatal error in main loop | ||
163 | +Traceback (most recent call last): | ||
164 | + File "test.py", line 173, in main | ||
165 | + run_model(args, q) | ||
166 | + File "test.py", line 255, in run_model | ||
167 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
168 | + File "test.py", line 288, in validate | ||
169 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
170 | + File "test.py", line 359, in save_error_case | ||
171 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img) | ||
172 | +IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number | ||
173 | +[eval] failed | ||
174 | +Number of model parameters: 161111 | ||
175 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
176 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
177 | +Fatal error in main loop | ||
178 | +Traceback (most recent call last): | ||
179 | + File "test.py", line 173, in main | ||
180 | + run_model(args, q) | ||
181 | + File "test.py", line 255, in run_model | ||
182 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
183 | + File "test.py", line 288, in validate | ||
184 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
185 | + File "test.py", line 361, in save_error_case | ||
186 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img) | ||
187 | +IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number | ||
188 | +[eval] failed | ||
189 | +Number of model parameters: 161111 | ||
190 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
191 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
192 | +Fatal error in main loop | ||
193 | +Traceback (most recent call last): | ||
194 | + File "test.py", line 173, in main | ||
195 | + run_model(args, q) | ||
196 | + File "test.py", line 255, in run_model | ||
197 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
198 | + File "test.py", line 288, in validate | ||
199 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
200 | + File "test.py", line 364, in save_error_case | ||
201 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img) | ||
202 | +IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number | ||
203 | +[eval] failed | ||
204 | +Number of model parameters: 161111 | ||
205 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
206 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
207 | +Fatal error in main loop | ||
208 | +Traceback (most recent call last): | ||
209 | + File "test.py", line 173, in main | ||
210 | + run_model(args, q) | ||
211 | + File "test.py", line 255, in run_model | ||
212 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
213 | + File "test.py", line 288, in validate | ||
214 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
215 | + File "test.py", line 363, in save_error_case | ||
216 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img) | ||
217 | +IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number | ||
218 | +[eval] failed | ||
219 | +Number of model parameters: 161111 | ||
220 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
221 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
222 | +Fatal error in main loop | ||
223 | +Traceback (most recent call last): | ||
224 | + File "test.py", line 173, in main | ||
225 | + run_model(args, q) | ||
226 | + File "test.py", line 255, in run_model | ||
227 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
228 | + File "test.py", line 288, in validate | ||
229 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
230 | + File "test.py", line 336, in save_error_case | ||
231 | + prob, pred = output.topk(maxk, 2, True, True) | ||
232 | +IndexError: Dimension out of range (expected to be in range of [-2, 1], but got 2) | ||
233 | +[eval] failed | ||
234 | +Number of model parameters: 161111 | ||
235 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
236 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
237 | +Fatal error in main loop | ||
238 | +Traceback (most recent call last): | ||
239 | + File "test.py", line 173, in main | ||
240 | + run_model(args, q) | ||
241 | + File "test.py", line 255, in run_model | ||
242 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
243 | + File "test.py", line 288, in validate | ||
244 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
245 | + File "test.py", line 341, in save_error_case | ||
246 | + pred = pred.view(batch_size) | ||
247 | +RuntimeError: shape '[256]' is invalid for input of size 512 | ||
248 | +[eval] failed | ||
249 | +Number of model parameters: 161111 | ||
250 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
251 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
252 | +Fatal error in main loop | ||
253 | +Traceback (most recent call last): | ||
254 | + File "test.py", line 173, in main | ||
255 | + run_model(args, q) | ||
256 | + File "test.py", line 255, in run_model | ||
257 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
258 | + File "test.py", line 288, in validate | ||
259 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
260 | + File "test.py", line 341, in save_error_case | ||
261 | + pred = pred.view(batch_size, -1) | ||
262 | +RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209 | ||
263 | +[eval] failed | ||
264 | +Number of model parameters: 161111 | ||
265 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
266 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633) | ||
267 | +Fatal error in main loop | ||
268 | +Traceback (most recent call last): | ||
269 | + File "test.py", line 173, in main | ||
270 | + run_model(args, q) | ||
271 | + File "test.py", line 255, in run_model | ||
272 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
273 | + File "test.py", line 288, in validate | ||
274 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
275 | + File "test.py", line 343, in save_error_case | ||
276 | + pred = pred.view(batch_size, -1) | ||
277 | +RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209 | ||
278 | +[eval] failed | ||
279 | +Number of model parameters: 161111 | ||
280 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
281 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
282 | +Fatal error in main loop | ||
283 | +Traceback (most recent call last): | ||
284 | + File "test.py", line 173, in main | ||
285 | + run_model(args, q) | ||
286 | + File "test.py", line 255, in run_model | ||
287 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
288 | + File "test.py", line 288, in validate | ||
289 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
290 | + File "test.py", line 344, in save_error_case | ||
291 | + pred = pred.view(batch_size, -1) | ||
292 | +RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209 | ||
293 | +[eval] failed | ||
294 | +Number of model parameters: 161111 | ||
295 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
296 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
297 | +Fatal error in main loop | ||
298 | +Traceback (most recent call last): | ||
299 | + File "test.py", line 173, in main | ||
300 | + run_model(args, q) | ||
301 | + File "test.py", line 255, in run_model | ||
302 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
303 | + File "test.py", line 288, in validate | ||
304 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
305 | + File "test.py", line 344, in save_error_case | ||
306 | + pred = pred.view(batch_size, 2) | ||
307 | +RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209 | ||
308 | +[eval] failed | ||
309 | +Number of model parameters: 161111 | ||
310 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
311 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
312 | +Fatal error in main loop | ||
313 | +Traceback (most recent call last): | ||
314 | + File "test.py", line 173, in main | ||
315 | + run_model(args, q) | ||
316 | + File "test.py", line 255, in run_model | ||
317 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
318 | + File "test.py", line 288, in validate | ||
319 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
320 | + File "test.py", line 344, in save_error_case | ||
321 | + correct = correct.view(batch_size) | ||
322 | +RuntimeError: shape '[256]' is invalid for input of size 512 | ||
323 | +[eval] failed | ||
324 | +Number of model parameters: 161111 | ||
325 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
326 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
327 | +Fatal error in main loop | ||
328 | +Traceback (most recent call last): | ||
329 | + File "test.py", line 173, in main | ||
330 | + run_model(args, q) | ||
331 | + File "test.py", line 255, in run_model | ||
332 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
333 | + File "test.py", line 288, in validate | ||
334 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
335 | + File "test.py", line 357, in save_error_case | ||
336 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img) | ||
337 | +IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number | ||
338 | +[eval] failed | ||
339 | +Number of model parameters: 161111 | ||
340 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
341 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
342 | +Fatal error in main loop | ||
343 | +Traceback (most recent call last): | ||
344 | + File "test.py", line 173, in main | ||
345 | + run_model(args, q) | ||
346 | + File "test.py", line 255, in run_model | ||
347 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
348 | + File "test.py", line 288, in validate | ||
349 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
350 | + File "test.py", line 358, in save_error_case | ||
351 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img) | ||
352 | +IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number | ||
353 | +[eval] failed | ||
354 | +Number of model parameters: 161111 | ||
355 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
356 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
357 | +Test: [0/2] Time 4.322 (4.322) Loss 0.5515 (0.5515) Prec@1 92.969 (92.969) | ||
358 | +Test: [1/2] Time 0.048 (2.185) Loss 0.2176 (0.4743) Prec@1 97.403 (93.994) | ||
359 | + * Prec@1 93.994 | ||
360 | +Best accuracy: 93.9939935586832 | ||
361 | +[eval] done | ||
362 | +Number of model parameters: 161111 | ||
363 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
364 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
365 | +Test: [0/2] Time 4.349 (4.349) Loss 0.5171 (0.5171) Prec@1 93.359 (93.359) | ||
366 | +Test: [1/2] Time 0.053 (2.201) Loss 0.3320 (0.4743) Prec@1 96.104 (93.994) | ||
367 | + * Prec@1 93.994 | ||
368 | +Best accuracy: 93.99399422310493 | ||
369 | +[eval] done | ||
370 | +Number of model parameters: 161111 | ||
371 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
372 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
373 | +Test: [0/2] Time 4.404 (4.404) Loss 0.5640 (0.5640) Prec@1 92.578 (92.578) | ||
374 | +Test: [1/2] Time 0.056 (2.230) Loss 0.1761 (0.4743) Prec@1 98.701 (93.994) | ||
375 | + * Prec@1 93.994 | ||
376 | +Best accuracy: 93.99399312337239 | ||
377 | +[eval] done | ||
378 | +Number of model parameters: 161111 | ||
379 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
380 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
381 | +Test: [0/2] Time 3.512 (3.512) Loss 0.4511 (0.4511) Prec@1 93.750 (93.750) | ||
382 | +Test: [1/2] Time 0.045 (1.779) Loss 0.5512 (0.4743) Prec@1 94.805 (93.994) | ||
383 | + * Prec@1 93.994 | ||
384 | +Best accuracy: 93.99399312337239 | ||
385 | +[eval] done | ||
386 | +Number of model parameters: 161111 | ||
387 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
388 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
389 | +Test: [0/2] Time 3.608 (3.608) Loss 0.5495 (0.5495) Prec@1 93.750 (93.750) | ||
390 | +Test: [1/2] Time 0.045 (1.827) Loss 0.2242 (0.4743) Prec@1 94.805 (93.994) | ||
391 | + * Prec@1 93.994 | ||
392 | +Best accuracy: 93.99399312337239 | ||
393 | +[eval] done | ||
394 | +Number of model parameters: 161111 | ||
395 | +=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
396 | +=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109) | ||
397 | +Test: [0/2] Time 3.403 (3.403) Loss 0.4165 (0.4165) Prec@1 94.531 (94.531) | ||
398 | +Test: [1/2] Time 0.045 (1.724) Loss 0.6662 (0.4743) Prec@1 92.208 (93.994) | ||
399 | + * Prec@1 93.994 | ||
400 | +Best accuracy: 93.99399445221589 | ||
401 | +[eval] done | ||
402 | +Number of model parameters: 161111 | ||
403 | +Number of model parameters: 161111 | ||
404 | +=> loading checkpoint 'output/All/40418_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
405 | +=> loaded checkpoint 'output/All/40418_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 399) | ||
406 | +Test: [0/2] Time 3.386 (3.386) Loss 0.3162 (0.3162) Prec@1 94.141 (94.141) | ||
407 | +Test: [1/2] Time 0.045 (1.716) Loss 0.3989 (0.3353) Prec@1 92.208 (93.694) | ||
408 | + * Prec@1 93.694 | ||
409 | +Best accuracy: 93.6936941519156 | ||
410 | +[eval] done | ||
411 | +Number of model parameters: 461559 | ||
412 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
413 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
414 | +Test: [0/2] Time 5.649 (5.649) Loss 0.2832 (0.2832) Prec@1 95.703 (95.703) | ||
415 | +Test: [1/2] Time 0.201 (2.925) Loss 0.5106 (0.3358) Prec@1 94.805 (95.495) | ||
416 | + * Prec@1 95.495 | ||
417 | +Best accuracy: 95.49549572460644 | ||
418 | +[eval] done | ||
419 | +Number of model parameters: 461559 | ||
420 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
421 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
422 | +Test: [0/2] Time 6.114 (6.114) Loss 0.3426 (0.3426) Prec@1 95.703 (95.703) | ||
423 | +Test: [1/2] Time 0.141 (3.127) Loss 0.3133 (0.3358) Prec@1 94.805 (95.495) | ||
424 | + * Prec@1 95.495 | ||
425 | +Best accuracy: 95.49549572460644 | ||
426 | +[eval] done |
1 | +Number of model parameters: 461559 | ||
2 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
4 | + * Prec@1 95.495 | ||
5 | + * Prec@1 95.495 | ||
6 | +Best accuracy: 95.49549572460644 | ||
7 | +[validate_2020-03-26-17-26-14] done | ||
8 | +[validate_2020-03-26-17-26-14] done | ||
9 | +/home/yh9468/detection/data/Fourth_data/demo Test dir submitted | ||
10 | +start test using path : /home/yh9468/detection/data/Fourth_data/demo | ||
11 | +Test start | ||
12 | +loading checkpoint... | ||
13 | +checkpoint already loaded! | ||
14 | +start test | ||
15 | +data path directory is /home/yh9468/detection/data/Fourth_data/demo | ||
16 | +finish test | ||
17 | +/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp test file submitted | ||
18 | +start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp', 'All Files(*)') | ||
19 | +Test start | ||
20 | +loading checkpoint... | ||
21 | +checkpoint already loaded! | ||
22 | +start test | ||
23 | +finish test |
1 | +Number of model parameters: 461559 | ||
2 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
4 | + * Prec@1 95.495 | ||
5 | + * Prec@1 95.495 | ||
6 | +Best accuracy: 95.49549572460644 | ||
7 | +[validate_2020-03-26-17-48-44] done | ||
8 | +[validate_2020-03-26-17-48-44] done | ||
9 | +set error |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 461559 | ||
3 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\test.py", line 270, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\test.py", line 360, in run_model | ||
10 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
11 | + File "E:\code\detection\trainer\test.py", line 394, in validate | ||
12 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True) | ||
13 | + File "E:\code\detection\trainer\test.py", line 455, in save_error_case | ||
14 | + os.mkdir(f"eval_results/{args['task']}") | ||
15 | +FileNotFoundError: [WinError 3] 지정된 경로를 찾을 수 없습니다: 'eval_results/All' | ||
16 | +[validate_2020-03-31-18-34-56] failed |
1 | +Number of model parameters: 461559 | ||
2 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
4 | +Test: [0/2] Time 118.781 (118.781) Loss 0.3068 (0.3068) Prec@1 95.703 (95.703) | ||
5 | +Test: [1/2] Time 3.661 (61.221) Loss 0.4321 (0.3358) Prec@1 94.805 (95.495) | ||
6 | + * Prec@1 95.495 | ||
7 | + * Prec@1 95.495 | ||
8 | +Best accuracy: 95.49549572460644 | ||
9 | +[validate_2020-03-31-19-08-47] done | ||
10 | +[validate_2020-03-31-19-08-47] done | ||
11 | +train start | ||
12 | +2020-03-31-19-11-26 | ||
13 | +use seed 963 | ||
14 | +use dataset : ../data/Fourth_data/All | ||
15 | +{'task': 'All/train_2020-03-31-19-11-26_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-19-11-26'} |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 462840 | ||
3 | +=> loading checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1617) | ||
5 | +Test: [0/3] Time 31.288 (31.288) Loss 0.2660 (0.2660) Prec@1 95.703 (95.703) | ||
6 | +Test: [1/3] Time 7.587 (19.437) Loss 0.3209 (0.2934) Prec@1 95.312 (95.508) | ||
7 | +Test: [2/3] Time 6.625 (15.167) Loss 0.1835 (0.2602) Prec@1 96.396 (95.777) | ||
8 | + * Prec@1 95.777 | ||
9 | + * Prec@1 95.777 | ||
10 | +Best accuracy: 95.77656669512757 | ||
11 | +[validate_2020-04-01-23-00-04] done | ||
12 | +[validate_2020-04-01-23-00-04] done | ||
13 | +set error | ||
14 | +Test를 수행하기 위해 데이터를 입력해 주세요. | ||
15 | +Test를 수행하기 위해 데이터를 입력해 주세요. |
1 | +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
2 | +Number of model parameters: 462840 | ||
3 | +=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' | ||
4 | +=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000) | ||
5 | +Test: [0/3] Time 32.591 (32.591) Loss 0.1575 (0.1575) Prec@1 94.531 (94.531) | ||
6 | +Test: [1/3] Time 8.179 (20.385) Loss 0.2475 (0.2025) Prec@1 93.750 (94.141) | ||
7 | +Test: [2/3] Time 7.374 (16.048) Loss 0.4568 (0.2794) Prec@1 94.595 (94.278) | ||
8 | + * Prec@1 94.278 | ||
9 | + * Prec@1 94.278 | ||
10 | +Best accuracy: 96.04904700754774 | ||
11 | +[validate_2020-04-03-17-39-50] done | ||
12 | +[validate_2020-04-03-17-39-50] done | ||
13 | +E:/code/detection/data/Fifth_data/All/Empty/1-5.bmp test file submitted | ||
14 | +Test start | ||
15 | +start test using path : E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
16 | +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
17 | +loading checkpoint... | ||
18 | +checkpoint already loaded! | ||
19 | +start test | ||
20 | +single_file_test() missing 1 required positional argument: 'q' | ||
21 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. |
logs/runs/Error/eval.log
0 → 100644
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
4 | +Test: [0/1] Time 0.609 (0.609) Loss 0.0259 (0.0259) Prec@1 99.408 (99.408) | ||
5 | + * Prec@1 99.408 | ||
6 | +Best accuracy: 100.0 | ||
7 | +[eval] done | ||
8 | +Number of model parameters: 154706 | ||
9 | +=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
10 | +=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
11 | +Fatal error in main loop | ||
12 | +Traceback (most recent call last): | ||
13 | + File "eval_binary_model.py", line 68, in main | ||
14 | + run_model(args) | ||
15 | + File "eval_binary_model.py", line 147, in run_model | ||
16 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
17 | + File "eval_binary_model.py", line 173, in validate | ||
18 | + for i, (input, target), (path, _) in enumerate(val_loader): | ||
19 | +ValueError: not enough values to unpack (expected 3, got 2) | ||
20 | +[eval] failed | ||
21 | +Number of model parameters: 154706 | ||
22 | +=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
23 | +=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
24 | +Test: [0/1] Time 0.615 (0.615) Loss 0.0259 (0.0259) Prec@1 99.408 (99.408) | ||
25 | + * Prec@1 99.408 | ||
26 | +Best accuracy: 100.0 | ||
27 | +[eval] done | ||
28 | +Number of model parameters: 154706 | ||
29 | +=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
30 | +=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
31 | +Test: [0/1] Time 0.667 (0.667) Loss 0.0259 (0.0259) Prec@1 99.408 (99.408) | ||
32 | + * Prec@1 99.408 | ||
33 | +Best accuracy: 100.0 | ||
34 | +[eval] done | ||
35 | +Number of model parameters: 154706 | ||
36 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
37 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1085) | ||
38 | +Test: [0/1] Time 0.683 (0.683) Loss 17.0075 (17.0075) Prec@1 14.201 (14.201) | ||
39 | + * Prec@1 14.201 | ||
40 | +Best accuracy: 100.0 | ||
41 | +[eval] done | ||
42 | +Number of model parameters: 154706 | ||
43 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
44 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1085) | ||
45 | +Test: [0/1] Time 0.698 (0.698) Loss 6.7597 (6.7597) Prec@1 31.953 (31.953) | ||
46 | + * Prec@1 31.953 | ||
47 | +Creating CAM | ||
48 | +Fatal error in main loop | ||
49 | +Traceback (most recent call last): | ||
50 | + File "eval_binary_model.py", line 56, in main | ||
51 | + run_model(args) | ||
52 | + File "eval_binary_model.py", line 135, in run_model | ||
53 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
54 | + File "eval_binary_model.py", line 196, in validate | ||
55 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None) | ||
56 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 164, in make_grad_cam | ||
57 | + images = torch.stack(images).to(device) | ||
58 | +RuntimeError: expected a non-empty list of Tensors | ||
59 | +[eval] failed | ||
60 | +Number of model parameters: 154706 | ||
61 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
62 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1718) | ||
63 | +Test: [0/1] Time 0.674 (0.674) Loss 6.5805 (6.5805) Prec@1 32.544 (32.544) | ||
64 | + * Prec@1 32.544 | ||
65 | +Creating CAM | ||
66 | +Fatal error in main loop | ||
67 | +Traceback (most recent call last): | ||
68 | + File "eval_binary_model.py", line 56, in main | ||
69 | + run_model(args) | ||
70 | + File "eval_binary_model.py", line 135, in run_model | ||
71 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
72 | + File "eval_binary_model.py", line 196, in validate | ||
73 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None) | ||
74 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 166, in make_grad_cam | ||
75 | + images = torch.stack(images).to(device) | ||
76 | +RuntimeError: expected a non-empty list of Tensors | ||
77 | +[eval] failed | ||
78 | +Number of model parameters: 154706 | ||
79 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
80 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1760) | ||
81 | +Test: [0/1] Time 0.709 (0.709) Loss 5.7071 (5.7071) Prec@1 31.953 (31.953) | ||
82 | + * Prec@1 31.953 | ||
83 | +Creating CAM | ||
84 | +Fatal error in main loop | ||
85 | +Traceback (most recent call last): | ||
86 | + File "eval_binary_model.py", line 56, in main | ||
87 | + run_model(args) | ||
88 | + File "eval_binary_model.py", line 135, in run_model | ||
89 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
90 | + File "eval_binary_model.py", line 196, in validate | ||
91 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None) | ||
92 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 165, in make_grad_cam | ||
93 | + images = torch.stack(images).to(device) | ||
94 | +RuntimeError: expected a non-empty list of Tensors | ||
95 | +[eval] failed | ||
96 | +Number of model parameters: 154706 | ||
97 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
98 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1843) | ||
99 | +Test: [0/1] Time 0.696 (0.696) Loss 7.9185 (7.9185) Prec@1 31.361 (31.361) | ||
100 | + * Prec@1 31.361 | ||
101 | +Creating CAM | ||
102 | +Fatal error in main loop | ||
103 | +Traceback (most recent call last): | ||
104 | + File "eval_binary_model.py", line 56, in main | ||
105 | + run_model(args) | ||
106 | + File "eval_binary_model.py", line 135, in run_model | ||
107 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
108 | + File "eval_binary_model.py", line 196, in validate | ||
109 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None) | ||
110 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 163, in make_grad_cam | ||
111 | + images, raw_images, image_paths = load_images(image_paths, normalize_factor) | ||
112 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 56, in load_images | ||
113 | + image, raw_image = preprocess(image_path, normalize_factor) | ||
114 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 80, in preprocess | ||
115 | + raw_image = cv2.resize(raw_image, (64,) * 2) | ||
116 | +cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize' | ||
117 | + | ||
118 | +[eval] failed | ||
119 | +Number of model parameters: 154706 | ||
120 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
121 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1843) | ||
122 | +Test: [0/1] Time 0.685 (0.685) Loss 7.9185 (7.9185) Prec@1 31.361 (31.361) | ||
123 | + * Prec@1 31.361 | ||
124 | +Creating CAM | ||
125 | +Fatal error in main loop | ||
126 | +Traceback (most recent call last): | ||
127 | + File "eval_binary_model.py", line 56, in main | ||
128 | + run_model(args) | ||
129 | + File "eval_binary_model.py", line 135, in run_model | ||
130 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
131 | + File "eval_binary_model.py", line 196, in validate | ||
132 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None) | ||
133 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 164, in make_grad_cam | ||
134 | + images, raw_images, image_paths = load_images(image_paths, normalize_factor) | ||
135 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 57, in load_images | ||
136 | + image, raw_image = preprocess(image_path, normalize_factor) | ||
137 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 81, in preprocess | ||
138 | + raw_image = cv2.resize(raw_image, (64,) * 2) | ||
139 | +cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize' | ||
140 | + | ||
141 | +[eval] failed | ||
142 | +Number of model parameters: 154706 | ||
143 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
144 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1843) | ||
145 | +Test: [0/1] Time 0.666 (0.666) Loss 7.9185 (7.9185) Prec@1 31.361 (31.361) | ||
146 | + * Prec@1 31.361 | ||
147 | +Creating CAM | ||
148 | +Best accuracy: 100.0 | ||
149 | +[eval] done | ||
150 | +Number of model parameters: 154706 | ||
151 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
152 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2117) | ||
153 | +Test: [0/1] Time 0.676 (0.676) Loss 0.0074 (0.0074) Prec@1 100.000 (100.000) | ||
154 | + * Prec@1 100.000 | ||
155 | +Creating CAM | ||
156 | +Fatal error in main loop | ||
157 | +Traceback (most recent call last): | ||
158 | + File "eval_binary_model.py", line 56, in main | ||
159 | + run_model(args) | ||
160 | + File "eval_binary_model.py", line 135, in run_model | ||
161 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
162 | + File "eval_binary_model.py", line 196, in validate | ||
163 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None) | ||
164 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 166, in make_grad_cam | ||
165 | + images = torch.stack(images).to(device) | ||
166 | +RuntimeError: expected a non-empty list of Tensors | ||
167 | +[eval] failed | ||
168 | +Number of model parameters: 154706 | ||
169 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
170 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2195) | ||
171 | +Test: [0/6] Time 0.754 (0.754) Loss 0.0064 (0.0064) Prec@1 100.000 (100.000) | ||
172 | + * Prec@1 99.606 | ||
173 | +Creating CAM | ||
174 | +Best accuracy: 100.0 | ||
175 | +[eval] done | ||
176 | +Number of model parameters: 154706 | ||
177 | +Number of model parameters: 154706 | ||
178 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
179 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995) | ||
180 | +Test: [0/1] Time 0.688 (0.688) Loss 0.0074 (0.0074) Prec@1 100.000 (100.000) | ||
181 | + * Prec@1 100.000 | ||
182 | +Creating CAM | ||
183 | +Best accuracy: 100.0 | ||
184 | +[eval] done | ||
185 | +Number of model parameters: 154706 | ||
186 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
187 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995) | ||
188 | +Test: [0/1] Time 0.694 (0.694) Loss 0.0074 (0.0074) Prec@1 100.000 (100.000) | ||
189 | + * Prec@1 100.000 | ||
190 | +Creating CAM | ||
191 | +Fatal error in main loop | ||
192 | +Traceback (most recent call last): | ||
193 | + File "test.py", line 171, in main | ||
194 | + run_model(args, q) | ||
195 | + File "test.py", line 249, in run_model | ||
196 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
197 | + File "test.py", line 310, in validate | ||
198 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None, cam_class=args['predict']['cam-class']) | ||
199 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 168, in make_grad_cam | ||
200 | + images = torch.stack(images).to(device) | ||
201 | +RuntimeError: expected a non-empty list of Tensors | ||
202 | +[eval] failed | ||
203 | +Number of model parameters: 154706 | ||
204 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
205 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995) | ||
206 | +Test: [0/7] Time 0.772 (0.772) Loss 0.0048 (0.0048) Prec@1 100.000 (100.000) | ||
207 | + * Prec@1 99.882 | ||
208 | +Creating CAM | ||
209 | +Best accuracy: 100.0 | ||
210 | +[eval] done | ||
211 | +Number of model parameters: 154706 | ||
212 | +=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
213 | +=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995) | ||
214 | +Test: [0/7] Time 0.767 (0.767) Loss 0.0024 (0.0024) Prec@1 100.000 (100.000) | ||
215 | + * Prec@1 99.882 | ||
216 | +Creating CAM | ||
217 | +Best accuracy: 100.0 | ||
218 | +[eval] done | ||
219 | +Number of model parameters: 154706 | ||
220 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
221 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
222 | +Test: [0/22] Time 1.470 (1.470) Loss 11.2787 (11.2787) Prec@1 51.172 (51.172) | ||
223 | +Test: [10/22] Time 0.152 (0.286) Loss 11.5936 (11.1885) Prec@1 51.953 (52.273) | ||
224 | +Test: [20/22] Time 0.139 (0.259) Loss 11.3118 (11.2861) Prec@1 49.609 (52.269) | ||
225 | + * Prec@1 52.311 | ||
226 | +Creating CAM | ||
227 | +Fatal error in main loop | ||
228 | +Traceback (most recent call last): | ||
229 | + File "test.py", line 171, in main | ||
230 | + run_model(args, q) | ||
231 | + File "test.py", line 249, in run_model | ||
232 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
233 | + File "test.py", line 310, in validate | ||
234 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None, cam_class=args['predict']['cam-class']) | ||
235 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 197, in make_grad_cam | ||
236 | + _ = gcam.forward(images) | ||
237 | + File "/home/yh9468/detection/trainer/visualize/grad_cam_utils.py", line 31, in forward | ||
238 | + self.logits = self.model(image) | ||
239 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
240 | + result = self.forward(*input, **kwargs) | ||
241 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/data_parallel.py", line 150, in forward | ||
242 | + return self.module(*inputs[0], **kwargs[0]) | ||
243 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
244 | + result = self.forward(*input, **kwargs) | ||
245 | + File "/home/yh9468/detection/trainer/model.py", line 170, in forward | ||
246 | + x = self.features(x) | ||
247 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
248 | + result = self.forward(*input, **kwargs) | ||
249 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py", line 92, in forward | ||
250 | + input = module(input) | ||
251 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
252 | + result = self.forward(*input, **kwargs) | ||
253 | + File "/home/yh9468/detection/trainer/model.py", line 31, in forward | ||
254 | + return x * F.relu6(x + 3., inplace=self.inplace) / 6. | ||
255 | +RuntimeError: CUDA out of memory. Tried to allocate 26.00 MiB (GPU 0; 10.92 GiB total capacity; 9.45 GiB already allocated; 6.56 MiB free; 52.02 MiB cached) | ||
256 | +[eval] failed | ||
257 | +Number of model parameters: 154706 | ||
258 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
259 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
260 | +Test: [0/7] Time 1.322 (1.322) Loss 16.0815 (16.0815) Prec@1 38.672 (38.672) | ||
261 | + * Prec@1 31.915 | ||
262 | +Creating CAM | ||
263 | +Best accuracy: 100.0 | ||
264 | +[eval] done | ||
265 | +Number of model parameters: 154706 | ||
266 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
267 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
268 | +Test: [0/7] Time 2.113 (2.113) Loss 17.8164 (17.8164) Prec@1 30.859 (30.859) | ||
269 | + * Prec@1 31.915 | ||
270 | +Creating CAM | ||
271 | +Best accuracy: 100.0 | ||
272 | +[eval] done | ||
273 | +Number of model parameters: 154706 | ||
274 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
275 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
276 | +Test: [0/7] Time 1.314 (1.314) Loss 5.2641 (5.2641) Prec@1 60.156 (60.156) | ||
277 | + * Prec@1 59.338 | ||
278 | +Creating CAM | ||
279 | +Best accuracy: 100.0 | ||
280 | +[eval] done | ||
281 | +Number of model parameters: 154706 | ||
282 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
283 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
284 | +Test: [0/7] Time 1.537 (1.537) Loss 2.6861 (2.6861) Prec@1 74.609 (74.609) | ||
285 | + * Prec@1 80.142 | ||
286 | +Creating CAM | ||
287 | +Best accuracy: 100.0 | ||
288 | +[eval] done | ||
289 | +Number of model parameters: 154706 | ||
290 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
291 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
292 | +Test: [0/7] Time 2.039 (2.039) Loss 0.0138 (0.0138) Prec@1 99.219 (99.219) | ||
293 | + * Prec@1 99.764 | ||
294 | +Creating CAM | ||
295 | +Best accuracy: 100.0 | ||
296 | +[eval] done | ||
297 | +Number of model parameters: 154706 | ||
298 | +=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
299 | +=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888) | ||
300 | +Test: [0/7] Time 1.235 (1.235) Loss 0.0215 (0.0215) Prec@1 99.609 (99.609) | ||
301 | +Test: [1/7] Time 0.158 (0.697) Loss 0.0061 (0.0138) Prec@1 100.000 (99.805) | ||
302 | +Test: [2/7] Time 0.156 (0.517) Loss 0.0033 (0.0103) Prec@1 100.000 (99.870) | ||
303 | +Test: [3/7] Time 0.156 (0.427) Loss 0.0044 (0.0088) Prec@1 100.000 (99.902) | ||
304 | +Test: [4/7] Time 0.157 (0.373) Loss 0.0091 (0.0089) Prec@1 99.609 (99.844) | ||
305 | +Test: [5/7] Time 0.213 (0.346) Loss 0.0090 (0.0089) Prec@1 99.609 (99.805) | ||
306 | +Test: [6/7] Time 0.179 (0.322) Loss 0.0216 (0.0101) Prec@1 99.359 (99.764) | ||
307 | + * Prec@1 99.764 | ||
308 | +Creating CAM | ||
309 | +Best accuracy: 100.0 | ||
310 | +[eval] done | ||
311 | +Number of model parameters: 154706 | ||
312 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
313 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
314 | +Test: [0/7] Time 0.948 (0.948) Loss 0.0047 (0.0047) Prec@1 99.609 (99.609) | ||
315 | +Test: [1/7] Time 0.152 (0.550) Loss 0.0033 (0.0040) Prec@1 99.609 (99.609) | ||
316 | +Test: [2/7] Time 0.153 (0.418) Loss 0.0287 (0.0122) Prec@1 99.219 (99.479) | ||
317 | +Test: [3/7] Time 0.152 (0.351) Loss 0.0358 (0.0181) Prec@1 99.609 (99.512) | ||
318 | +Test: [4/7] Time 0.151 (0.311) Loss 0.0010 (0.0147) Prec@1 100.000 (99.609) | ||
319 | +Test: [5/7] Time 0.152 (0.285) Loss 0.0004 (0.0123) Prec@1 100.000 (99.674) | ||
320 | +Test: [6/7] Time 0.167 (0.268) Loss 0.0029 (0.0114) Prec@1 100.000 (99.704) | ||
321 | + * Prec@1 99.704 | ||
322 | +Creating CAM | ||
323 | +Best accuracy: 100.0 | ||
324 | +[eval] done | ||
325 | +Number of model parameters: 154706 | ||
326 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
327 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
328 | +Test: [0/7] Time 0.754 (0.754) Loss 0.0067 (0.0067) Prec@1 99.609 (99.609) | ||
329 | +Test: [1/7] Time 0.021 (0.387) Loss 0.0374 (0.0221) Prec@1 99.609 (99.609) | ||
330 | +Test: [2/7] Time 0.016 (0.264) Loss 0.0039 (0.0160) Prec@1 99.609 (99.609) | ||
331 | +Test: [3/7] Time 0.016 (0.202) Loss 0.0048 (0.0132) Prec@1 99.609 (99.609) | ||
332 | +Test: [4/7] Time 0.016 (0.165) Loss 0.0219 (0.0149) Prec@1 99.609 (99.609) | ||
333 | +Test: [5/7] Time 0.015 (0.140) Loss 0.0004 (0.0125) Prec@1 100.000 (99.674) | ||
334 | +Test: [6/7] Time 0.076 (0.131) Loss 0.0009 (0.0114) Prec@1 100.000 (99.704) | ||
335 | + * Prec@1 99.704 | ||
336 | +Creating CAM | ||
337 | +Best accuracy: 100.0 | ||
338 | +[eval] done | ||
339 | +Number of model parameters: 154706 | ||
340 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
341 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
342 | +Test: [0/7] Time 0.749 (0.749) Loss 0.0039 (0.0039) Prec@1 99.609 (99.609) | ||
343 | +Test: [1/7] Time 0.019 (0.384) Loss 0.0214 (0.0126) Prec@1 99.609 (99.609) | ||
344 | +Test: [2/7] Time 0.016 (0.261) Loss 0.0062 (0.0105) Prec@1 99.609 (99.609) | ||
345 | +Test: [3/7] Time 0.015 (0.199) Loss 0.0009 (0.0081) Prec@1 100.000 (99.707) | ||
346 | +Test: [4/7] Time 0.015 (0.162) Loss 0.0016 (0.0068) Prec@1 100.000 (99.766) | ||
347 | +Test: [5/7] Time 0.016 (0.138) Loss 0.0397 (0.0123) Prec@1 99.219 (99.674) | ||
348 | +Test: [6/7] Time 0.074 (0.129) Loss 0.0031 (0.0114) Prec@1 100.000 (99.704) | ||
349 | + * Prec@1 99.704 | ||
350 | +Creating CAM | ||
351 | +Best accuracy: 100.0 | ||
352 | +[eval] done | ||
353 | +Number of model parameters: 154706 | ||
354 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
355 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
356 | +Test: [0/7] Time 0.736 (0.736) Loss 0.0660 (0.0660) Prec@1 98.438 (98.438) | ||
357 | +Test: [1/7] Time 0.017 (0.376) Loss 0.0008 (0.0334) Prec@1 100.000 (99.219) | ||
358 | +Test: [2/7] Time 0.016 (0.256) Loss 0.0045 (0.0238) Prec@1 99.609 (99.349) | ||
359 | +Test: [3/7] Time 0.016 (0.196) Loss 0.0006 (0.0180) Prec@1 100.000 (99.512) | ||
360 | +Test: [4/7] Time 0.016 (0.160) Loss 0.0007 (0.0145) Prec@1 100.000 (99.609) | ||
361 | +Test: [5/7] Time 0.015 (0.136) Loss 0.0018 (0.0124) Prec@1 100.000 (99.674) | ||
362 | +Test: [6/7] Time 0.097 (0.130) Loss 0.0019 (0.0114) Prec@1 100.000 (99.704) | ||
363 | + * Prec@1 99.704 | ||
364 | +Creating CAM | ||
365 | +Fatal error in main loop | ||
366 | +Traceback (most recent call last): | ||
367 | + File "test.py", line 171, in main | ||
368 | + run_model(args, q) | ||
369 | + File "test.py", line 249, in run_model | ||
370 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
371 | + File "test.py", line 307, in validate | ||
372 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
373 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 174, in make_grad_cam | ||
374 | + cv2.imwrite(f"eval_results/Error/error_case/check/{image_path.split('/')[-1]}", raw_image) | ||
375 | +cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgcodecs/src/loadsave.cpp:661: error: (-2:Unspecified error) could not find a writer for the specified extension in function 'imwrite_' | ||
376 | + | ||
377 | +[eval] failed | ||
378 | +Number of model parameters: 154706 | ||
379 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
380 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
381 | +Test: [0/7] Time 0.717 (0.717) Loss 0.0004 (0.0004) Prec@1 100.000 (100.000) | ||
382 | +Test: [1/7] Time 0.026 (0.372) Loss 0.0005 (0.0004) Prec@1 100.000 (100.000) | ||
383 | +Test: [2/7] Time 0.016 (0.253) Loss 0.0010 (0.0006) Prec@1 100.000 (100.000) | ||
384 | +Test: [3/7] Time 0.023 (0.196) Loss 0.0079 (0.0025) Prec@1 99.219 (99.805) | ||
385 | +Test: [4/7] Time 0.016 (0.160) Loss 0.0232 (0.0066) Prec@1 99.609 (99.766) | ||
386 | +Test: [5/7] Time 0.016 (0.136) Loss 0.0366 (0.0116) Prec@1 99.609 (99.740) | ||
387 | +Test: [6/7] Time 0.080 (0.128) Loss 0.0098 (0.0114) Prec@1 99.359 (99.704) | ||
388 | + * Prec@1 99.704 | ||
389 | +Creating CAM | ||
390 | +Fatal error in main loop | ||
391 | +Traceback (most recent call last): | ||
392 | + File "test.py", line 171, in main | ||
393 | + run_model(args, q) | ||
394 | + File "test.py", line 249, in run_model | ||
395 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
396 | + File "test.py", line 307, in validate | ||
397 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
398 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 167, in make_grad_cam | ||
399 | + images, raw_images, image_paths = load_images(image_paths, normalize_factor) | ||
400 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 58, in load_images | ||
401 | + image, raw_image = preprocess(image_path, normalize_factor) | ||
402 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 82, in preprocess | ||
403 | + raw_image = cv2.resize(raw_image, (128,) * 2) | ||
404 | +cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize' | ||
405 | + | ||
406 | +[eval] failed | ||
407 | +Number of model parameters: 154706 | ||
408 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
409 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
410 | +Test: [0/7] Time 0.762 (0.762) Loss 0.0016 (0.0016) Prec@1 100.000 (100.000) | ||
411 | +Test: [1/7] Time 0.026 (0.394) Loss 0.0004 (0.0010) Prec@1 100.000 (100.000) | ||
412 | +Test: [2/7] Time 0.023 (0.270) Loss 0.0092 (0.0038) Prec@1 99.219 (99.740) | ||
413 | +Test: [3/7] Time 0.016 (0.207) Loss 0.0048 (0.0040) Prec@1 99.609 (99.707) | ||
414 | +Test: [4/7] Time 0.016 (0.169) Loss 0.0223 (0.0077) Prec@1 99.609 (99.688) | ||
415 | +Test: [5/7] Time 0.021 (0.144) Loss 0.0369 (0.0125) Prec@1 99.609 (99.674) | ||
416 | +Test: [6/7] Time 0.081 (0.135) Loss 0.0006 (0.0114) Prec@1 100.000 (99.704) | ||
417 | + * Prec@1 99.704 | ||
418 | +Creating CAM | ||
419 | +Fatal error in main loop | ||
420 | +Traceback (most recent call last): | ||
421 | + File "test.py", line 171, in main | ||
422 | + run_model(args, q) | ||
423 | + File "test.py", line 249, in run_model | ||
424 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
425 | + File "test.py", line 307, in validate | ||
426 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
427 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 175, in make_grad_cam | ||
428 | + cv2.imwrite(f"eval_results/Error/error_case/check/{image_path.split('/')[-1]}", raw_image) | ||
429 | +cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgcodecs/src/loadsave.cpp:661: error: (-2:Unspecified error) could not find a writer for the specified extension in function 'imwrite_' | ||
430 | + | ||
431 | +[eval] failed | ||
432 | +Number of model parameters: 154706 | ||
433 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
434 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
435 | +Test: [0/7] Time 0.734 (0.734) Loss 0.0066 (0.0066) Prec@1 99.219 (99.219) | ||
436 | +Test: [1/7] Time 0.019 (0.376) Loss 0.0215 (0.0140) Prec@1 99.609 (99.414) | ||
437 | +Test: [2/7] Time 0.015 (0.256) Loss 0.0006 (0.0095) Prec@1 100.000 (99.609) | ||
438 | +Test: [3/7] Time 0.015 (0.196) Loss 0.0031 (0.0079) Prec@1 100.000 (99.707) | ||
439 | +Test: [4/7] Time 0.016 (0.160) Loss 0.0362 (0.0136) Prec@1 99.609 (99.688) | ||
440 | +Test: [5/7] Time 0.016 (0.136) Loss 0.0062 (0.0124) Prec@1 99.609 (99.674) | ||
441 | +Test: [6/7] Time 0.082 (0.128) Loss 0.0024 (0.0114) Prec@1 100.000 (99.704) | ||
442 | + * Prec@1 99.704 | ||
443 | +Creating CAM | ||
444 | +Fatal error in main loop | ||
445 | +Traceback (most recent call last): | ||
446 | + File "test.py", line 171, in main | ||
447 | + run_model(args, q) | ||
448 | + File "test.py", line 249, in run_model | ||
449 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
450 | + File "test.py", line 307, in validate | ||
451 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
452 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 168, in make_grad_cam | ||
453 | + images, raw_images, image_paths = load_images(image_paths, normalize_factor) | ||
454 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 57, in load_images | ||
455 | + del image_path[i] | ||
456 | +TypeError: 'str' object doesn't support item deletion | ||
457 | +[eval] failed | ||
458 | +Number of model parameters: 154706 | ||
459 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
460 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
461 | +Test: [0/7] Time 0.737 (0.737) Loss 0.0036 (0.0036) Prec@1 100.000 (100.000) | ||
462 | +Test: [1/7] Time 0.033 (0.385) Loss 0.0066 (0.0051) Prec@1 99.609 (99.805) | ||
463 | +Test: [2/7] Time 0.016 (0.262) Loss 0.0033 (0.0045) Prec@1 99.609 (99.740) | ||
464 | +Test: [3/7] Time 0.015 (0.200) Loss 0.0010 (0.0036) Prec@1 100.000 (99.805) | ||
465 | +Test: [4/7] Time 0.015 (0.163) Loss 0.0009 (0.0031) Prec@1 100.000 (99.844) | ||
466 | +Test: [5/7] Time 0.015 (0.139) Loss 0.0008 (0.0027) Prec@1 100.000 (99.870) | ||
467 | +Test: [6/7] Time 0.070 (0.129) Loss 0.0976 (0.0114) Prec@1 98.077 (99.704) | ||
468 | + * Prec@1 99.704 | ||
469 | +Creating CAM | ||
470 | +Best accuracy: 100.0 | ||
471 | +[eval] done | ||
472 | +Number of model parameters: 154706 | ||
473 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
474 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
475 | +Test: [0/7] Time 0.751 (0.751) Loss 0.0568 (0.0568) Prec@1 99.219 (99.219) | ||
476 | +Test: [1/7] Time 0.017 (0.384) Loss 0.0015 (0.0292) Prec@1 100.000 (99.609) | ||
477 | +Test: [2/7] Time 0.016 (0.261) Loss 0.0007 (0.0197) Prec@1 100.000 (99.740) | ||
478 | +Test: [3/7] Time 0.016 (0.200) Loss 0.0038 (0.0157) Prec@1 99.609 (99.707) | ||
479 | +Test: [4/7] Time 0.015 (0.163) Loss 0.0016 (0.0129) Prec@1 100.000 (99.766) | ||
480 | +Test: [5/7] Time 0.017 (0.139) Loss 0.0111 (0.0126) Prec@1 99.219 (99.674) | ||
481 | +Test: [6/7] Time 0.104 (0.134) Loss 0.0002 (0.0114) Prec@1 100.000 (99.704) | ||
482 | + * Prec@1 99.704 | ||
483 | +Creating CAM | ||
484 | +Best accuracy: 100.0 | ||
485 | +[eval] done | ||
486 | +Number of model parameters: 154706 | ||
487 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
488 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
489 | +Test: [0/7] Time 0.756 (0.756) Loss 0.0072 (0.0072) Prec@1 99.219 (99.219) | ||
490 | +Test: [1/7] Time 0.018 (0.387) Loss 0.0063 (0.0068) Prec@1 99.609 (99.414) | ||
491 | +Test: [2/7] Time 0.017 (0.264) Loss 0.0576 (0.0237) Prec@1 99.219 (99.349) | ||
492 | +Test: [3/7] Time 0.015 (0.202) Loss 0.0002 (0.0179) Prec@1 100.000 (99.512) | ||
493 | +Test: [4/7] Time 0.015 (0.164) Loss 0.0006 (0.0144) Prec@1 100.000 (99.609) | ||
494 | +Test: [5/7] Time 0.015 (0.139) Loss 0.0027 (0.0125) Prec@1 100.000 (99.674) | ||
495 | +Test: [6/7] Time 0.088 (0.132) Loss 0.0014 (0.0114) Prec@1 100.000 (99.704) | ||
496 | + * Prec@1 99.704 | ||
497 | +Creating CAM | ||
498 | +Best accuracy: 100.0 | ||
499 | +[eval] done | ||
500 | +Number of model parameters: 154706 | ||
501 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
502 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
503 | +Test: [0/7] Time 0.719 (0.719) Loss 0.0020 (0.0020) Prec@1 100.000 (100.000) | ||
504 | +Test: [1/7] Time 0.035 (0.377) Loss 0.0047 (0.0033) Prec@1 99.609 (99.805) | ||
505 | +Test: [2/7] Time 0.016 (0.256) Loss 0.0006 (0.0024) Prec@1 100.000 (99.870) | ||
506 | +Test: [3/7] Time 0.016 (0.196) Loss 0.0359 (0.0108) Prec@1 99.609 (99.805) | ||
507 | +Test: [4/7] Time 0.015 (0.160) Loss 0.0012 (0.0089) Prec@1 100.000 (99.844) | ||
508 | +Test: [5/7] Time 0.016 (0.136) Loss 0.0071 (0.0086) Prec@1 99.609 (99.805) | ||
509 | +Test: [6/7] Time 0.093 (0.130) Loss 0.0396 (0.0114) Prec@1 98.718 (99.704) | ||
510 | + * Prec@1 99.704 | ||
511 | +Creating CAM | ||
512 | +Best accuracy: 100.0 | ||
513 | +[eval] done | ||
514 | +Number of model parameters: 154706 | ||
515 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
516 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
517 | +Test: [0/7] Time 0.746 (0.746) Loss 0.0003 (0.0003) Prec@1 100.000 (100.000) | ||
518 | +Test: [1/7] Time 0.026 (0.386) Loss 0.0004 (0.0004) Prec@1 100.000 (100.000) | ||
519 | +Test: [2/7] Time 0.032 (0.268) Loss 0.0264 (0.0091) Prec@1 99.219 (99.740) | ||
520 | +Test: [3/7] Time 0.017 (0.205) Loss 0.0414 (0.0171) Prec@1 99.219 (99.609) | ||
521 | +Test: [4/7] Time 0.015 (0.167) Loss 0.0030 (0.0143) Prec@1 100.000 (99.688) | ||
522 | +Test: [5/7] Time 0.020 (0.142) Loss 0.0038 (0.0126) Prec@1 99.609 (99.674) | ||
523 | +Test: [6/7] Time 0.079 (0.133) Loss 0.0005 (0.0114) Prec@1 100.000 (99.704) | ||
524 | + * Prec@1 99.704 | ||
525 | +Creating CAM | ||
526 | +Best accuracy: 100.0 | ||
527 | +[eval] done | ||
528 | +Number of model parameters: 154706 | ||
529 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
530 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
531 | +Test: [0/7] Time 0.781 (0.781) Loss 0.0231 (0.0231) Prec@1 99.609 (99.609) | ||
532 | +Test: [1/7] Time 0.019 (0.400) Loss 0.0072 (0.0152) Prec@1 99.219 (99.414) | ||
533 | +Test: [2/7] Time 0.016 (0.272) Loss 0.0004 (0.0102) Prec@1 100.000 (99.609) | ||
534 | +Test: [3/7] Time 0.015 (0.208) Loss 0.0014 (0.0080) Prec@1 100.000 (99.707) | ||
535 | +Test: [4/7] Time 0.016 (0.169) Loss 0.0062 (0.0077) Prec@1 99.609 (99.688) | ||
536 | +Test: [5/7] Time 0.019 (0.144) Loss 0.0005 (0.0065) Prec@1 100.000 (99.740) | ||
537 | +Test: [6/7] Time 0.095 (0.137) Loss 0.0605 (0.0114) Prec@1 99.359 (99.704) | ||
538 | + * Prec@1 99.704 | ||
539 | +Creating CAM | ||
540 | +Best accuracy: 100.0 | ||
541 | +[eval] done | ||
542 | +Number of model parameters: 154706 | ||
543 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
544 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
545 | +Test: [0/7] Time 0.758 (0.758) Loss 0.0105 (0.0105) Prec@1 99.219 (99.219) | ||
546 | +Test: [1/7] Time 0.017 (0.388) Loss 0.0006 (0.0056) Prec@1 100.000 (99.609) | ||
547 | +Test: [2/7] Time 0.017 (0.264) Loss 0.0216 (0.0109) Prec@1 99.609 (99.609) | ||
548 | +Test: [3/7] Time 0.016 (0.202) Loss 0.0360 (0.0172) Prec@1 99.609 (99.609) | ||
549 | +Test: [4/7] Time 0.016 (0.165) Loss 0.0015 (0.0140) Prec@1 100.000 (99.688) | ||
550 | +Test: [5/7] Time 0.016 (0.140) Loss 0.0048 (0.0125) Prec@1 99.609 (99.674) | ||
551 | +Test: [6/7] Time 0.077 (0.131) Loss 0.0011 (0.0114) Prec@1 100.000 (99.704) | ||
552 | + * Prec@1 99.704 | ||
553 | +Creating CAM | ||
554 | +Fatal error in main loop | ||
555 | +Traceback (most recent call last): | ||
556 | + File "test.py", line 171, in main | ||
557 | + run_model(args, q) | ||
558 | + File "test.py", line 249, in run_model | ||
559 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
560 | + File "test.py", line 307, in validate | ||
561 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
562 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 163, in make_grad_cam | ||
563 | + image_paths.remove('cam') | ||
564 | +ValueError: list.remove(x): x not in list | ||
565 | +[eval] failed | ||
566 | +Number of model parameters: 154706 | ||
567 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
568 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
569 | +Test: [0/7] Time 0.812 (0.812) Loss 0.0411 (0.0411) Prec@1 99.219 (99.219) | ||
570 | +Test: [1/7] Time 0.020 (0.416) Loss 0.0036 (0.0223) Prec@1 99.609 (99.414) | ||
571 | +Test: [2/7] Time 0.016 (0.283) Loss 0.0016 (0.0154) Prec@1 100.000 (99.609) | ||
572 | +Test: [3/7] Time 0.016 (0.216) Loss 0.0066 (0.0132) Prec@1 99.609 (99.609) | ||
573 | +Test: [4/7] Time 0.019 (0.177) Loss 0.0003 (0.0106) Prec@1 100.000 (99.688) | ||
574 | +Test: [5/7] Time 0.017 (0.150) Loss 0.0005 (0.0089) Prec@1 100.000 (99.740) | ||
575 | +Test: [6/7] Time 0.088 (0.141) Loss 0.0362 (0.0114) Prec@1 99.359 (99.704) | ||
576 | + * Prec@1 99.704 | ||
577 | +Creating CAM | ||
578 | +Fatal error in main loop | ||
579 | +Traceback (most recent call last): | ||
580 | + File "test.py", line 171, in main | ||
581 | + run_model(args, q) | ||
582 | + File "test.py", line 249, in run_model | ||
583 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
584 | + File "test.py", line 307, in validate | ||
585 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
586 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 165, in make_grad_cam | ||
587 | + images, raw_images, _ = load_images(image_paths, normalize_factor) | ||
588 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 54, in load_images | ||
589 | + image, raw_image = preprocess(image_path, normalize_factor) | ||
590 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 78, in preprocess | ||
591 | + raw_image = cv2.resize(raw_image, (128,) * 2) | ||
592 | +cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize' | ||
593 | + | ||
594 | +[eval] failed | ||
595 | +Number of model parameters: 154706 | ||
596 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
597 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
598 | +Test: [0/7] Time 0.748 (0.748) Loss 0.0066 (0.0066) Prec@1 99.609 (99.609) | ||
599 | +Test: [1/7] Time 0.018 (0.383) Loss 0.0003 (0.0035) Prec@1 100.000 (99.805) | ||
600 | +Test: [2/7] Time 0.016 (0.261) Loss 0.0050 (0.0040) Prec@1 99.609 (99.740) | ||
601 | +Test: [3/7] Time 0.017 (0.200) Loss 0.0385 (0.0126) Prec@1 99.219 (99.609) | ||
602 | +Test: [4/7] Time 0.015 (0.163) Loss 0.0019 (0.0105) Prec@1 100.000 (99.688) | ||
603 | +Test: [5/7] Time 0.016 (0.138) Loss 0.0230 (0.0126) Prec@1 99.609 (99.674) | ||
604 | +Test: [6/7] Time 0.081 (0.130) Loss 0.0005 (0.0114) Prec@1 100.000 (99.704) | ||
605 | + * Prec@1 99.704 | ||
606 | +Creating CAM | ||
607 | +Best accuracy: 100.0 | ||
608 | +[eval] done | ||
609 | +Number of model parameters: 154706 | ||
610 | +=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
611 | +=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
612 | +Test: [0/7] Time 0.748 (0.748) Loss 0.0002 (0.0002) Prec@1 100.000 (100.000) | ||
613 | +Test: [1/7] Time 0.036 (0.392) Loss 0.0034 (0.0018) Prec@1 99.609 (99.805) | ||
614 | +Test: [2/7] Time 0.016 (0.266) Loss 0.0005 (0.0014) Prec@1 100.000 (99.870) | ||
615 | +Test: [3/7] Time 0.017 (0.204) Loss 0.0267 (0.0077) Prec@1 99.219 (99.707) | ||
616 | +Test: [4/7] Time 0.016 (0.167) Loss 0.0077 (0.0077) Prec@1 99.609 (99.688) | ||
617 | +Test: [5/7] Time 0.016 (0.141) Loss 0.0367 (0.0125) Prec@1 99.609 (99.674) | ||
618 | +Test: [6/7] Time 0.091 (0.134) Loss 0.0006 (0.0114) Prec@1 100.000 (99.704) | ||
619 | + * Prec@1 99.704 | ||
620 | +Creating CAM | ||
621 | +Best accuracy: 100.0 | ||
622 | +[eval] done | ||
623 | +Number of model parameters: 318810 | ||
624 | +=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/best_model.pth.tar' | ||
625 | +Fatal error in main loop | ||
626 | +Traceback (most recent call last): | ||
627 | + File "test.py", line 171, in main | ||
628 | + run_model(args, q) | ||
629 | + File "test.py", line 235, in run_model | ||
630 | + checkpoint = torch.load(args['checkpoint']) | ||
631 | + File "/usr/local/lib/python3.6/dist-packages/torch/serialization.py", line 382, in load | ||
632 | + f = open(f, 'rb') | ||
633 | +FileNotFoundError: [Errno 2] No such file or directory: 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/best_model.pth.tar' | ||
634 | +[eval] failed | ||
635 | +Number of model parameters: 154706 | ||
636 | +=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
637 | +=> loaded checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2348) | ||
638 | +Test: [0/2] Time 1.785 (1.785) Loss 6.5358 (6.5358) Prec@1 67.969 (67.969) | ||
639 | +Test: [1/2] Time 0.281 (1.033) Loss 6.3798 (6.4966) Prec@1 68.605 (68.129) | ||
640 | + * Prec@1 68.129 | ||
641 | +Creating CAM | ||
642 | +Best accuracy: 95.3216364899574 | ||
643 | +[eval] done | ||
644 | +Number of model parameters: 154706 | ||
645 | +=> loading checkpoint 'output/Error/25039_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
646 | +=> loaded checkpoint 'output/Error/25039_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 229) | ||
647 | +Test: [0/2] Time 1.178 (1.178) Loss 0.3317 (0.3317) Prec@1 89.062 (89.062) | ||
648 | +Test: [1/2] Time 0.077 (0.627) Loss 0.3194 (0.3286) Prec@1 87.209 (88.596) | ||
649 | + * Prec@1 88.596 | ||
650 | +Creating CAM | ||
651 | +Best accuracy: 88.59649033574333 | ||
652 | +[eval] done | ||
653 | +Number of model parameters: 154706 | ||
654 | +Number of model parameters: 154706 | ||
655 | +=> loading checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
656 | +=> loaded checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 128) | ||
657 | +Test: [0/2] Time 1.903 (1.903) Loss 22.3839 (22.3839) Prec@1 66.016 (66.016) | ||
658 | +Test: [1/2] Time 0.066 (0.985) Loss 16.9735 (21.0234) Prec@1 74.419 (68.129) | ||
659 | + * Prec@1 68.129 | ||
660 | +Creating CAM | ||
661 | +Best accuracy: 94.7368419267978 | ||
662 | +[eval] done | ||
663 | +Number of model parameters: 154706 | ||
664 | +=> loading checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
665 | +=> loaded checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 302) | ||
666 | +Test: [0/2] Time 1.783 (1.783) Loss 0.1690 (0.1690) Prec@1 94.922 (94.922) | ||
667 | +Test: [1/2] Time 0.047 (0.915) Loss 0.1477 (0.1637) Prec@1 96.512 (95.322) | ||
668 | + * Prec@1 95.322 | ||
669 | +Creating CAM | ||
670 | +Best accuracy: 95.32163724843521 | ||
671 | +[eval] done | ||
672 | +Number of model parameters: 154706 | ||
673 | +=> loading checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
674 | +=> loaded checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 2021) | ||
675 | +Test: [0/14] Time 1.783 (1.783) Loss 0.1130 (0.1130) Prec@1 94.922 (94.922) | ||
676 | +Test: [1/14] Time 0.127 (0.955) Loss 0.1829 (0.1479) Prec@1 91.016 (92.969) | ||
677 | +Test: [2/14] Time 0.023 (0.644) Loss 0.1316 (0.1425) Prec@1 94.531 (93.490) | ||
678 | +Test: [3/14] Time 0.022 (0.489) Loss 0.1273 (0.1387) Prec@1 95.312 (93.945) | ||
679 | +Test: [4/14] Time 0.036 (0.398) Loss 0.1953 (0.1500) Prec@1 90.234 (93.203) | ||
680 | +Test: [5/14] Time 0.029 (0.337) Loss 0.1562 (0.1510) Prec@1 93.359 (93.229) | ||
681 | +Test: [6/14] Time 0.023 (0.292) Loss 0.1281 (0.1478) Prec@1 94.922 (93.471) | ||
682 | +Test: [7/14] Time 0.032 (0.259) Loss 0.2017 (0.1545) Prec@1 91.797 (93.262) | ||
683 | +Test: [8/14] Time 0.024 (0.233) Loss 0.1373 (0.1526) Prec@1 94.531 (93.403) | ||
684 | +Test: [9/14] Time 0.029 (0.213) Loss 0.1353 (0.1509) Prec@1 94.531 (93.516) | ||
685 | +Test: [10/14] Time 0.018 (0.195) Loss 0.1153 (0.1476) Prec@1 96.094 (93.750) | ||
686 | +Test: [11/14] Time 0.046 (0.183) Loss 0.1625 (0.1489) Prec@1 92.578 (93.652) | ||
687 | +Test: [12/14] Time 0.024 (0.170) Loss 0.1584 (0.1496) Prec@1 94.141 (93.690) | ||
688 | +Test: [13/14] Time 0.098 (0.165) Loss 0.1581 (0.1498) Prec@1 92.473 (93.657) | ||
689 | + * Prec@1 93.657 | ||
690 | +Creating CAM | ||
691 | +Best accuracy: 95.90643123158237 | ||
692 | +[eval] done | ||
693 | +Number of model parameters: 154706 | ||
694 | +=> loading checkpoint 'output/Error/39852_model=Efficientnet-ep=3000-block=4/checkpoint.pth.tar' | ||
695 | +Fatal error in main loop | ||
696 | +Traceback (most recent call last): | ||
697 | + File "test.py", line 174, in main | ||
698 | + run_model(args, q) | ||
699 | + File "test.py", line 241, in run_model | ||
700 | + model.load_state_dict(checkpoint['state_dict']) | ||
701 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 777, in load_state_dict | ||
702 | + self.__class__.__name__, "\n\t".join(error_msgs))) | ||
703 | +RuntimeError: Error(s) in loading state_dict for DataParallel: | ||
704 | + Missing key(s) in state_dict: "module.features.0.0.weight", "module.features.0.1.weight", "module.features.0.1.bias", "module.features.0.1.running_mean", "module.features.0.1.running_var", "module.features.1.conv.0.weight", "module.features.1.conv.1.weight", "module.features.1.conv.1.bias", "module.features.1.conv.1.running_mean", "module.features.1.conv.1.running_var", "module.features.1.conv.3.weight", "module.features.1.conv.4.weight", "module.features.1.conv.4.bias", "module.features.1.conv.4.running_mean", "module.features.1.conv.4.running_var", "module.features.1.conv.5.fc.0.weight", "module.features.1.conv.5.fc.2.weight", "module.features.1.conv.7.weight", "module.features.1.conv.8.weight", "module.features.1.conv.8.bias", "module.features.1.conv.8.running_mean", "module.features.1.conv.8.running_var", "module.features.2.conv.0.weight", "module.features.2.conv.1.weight", "module.features.2.conv.1.bias", "module.features.2.conv.1.running_mean", "module.features.2.conv.1.running_var", "module.features.2.conv.3.weight", "module.features.2.conv.4.weight", "module.features.2.conv.4.bias", "module.features.2.conv.4.running_mean", "module.features.2.conv.4.running_var", "module.features.2.conv.7.weight", "module.features.2.conv.8.weight", "module.features.2.conv.8.bias", "module.features.2.conv.8.running_mean", "module.features.2.conv.8.running_var", "module.features.3.conv.0.weight", "module.features.3.conv.1.weight", "module.features.3.conv.1.bias", "module.features.3.conv.1.running_mean", "module.features.3.conv.1.running_var", "module.features.3.conv.3.weight", "module.features.3.conv.4.weight", "module.features.3.conv.4.bias", "module.features.3.conv.4.running_mean", "module.features.3.conv.4.running_var", "module.features.3.conv.7.weight", "module.features.3.conv.8.weight", "module.features.3.conv.8.bias", "module.features.3.conv.8.running_mean", "module.features.3.conv.8.running_var", "module.features.4.conv.0.weight", "module.features.4.conv.1.weight", "module.features.4.conv.1.bias", "module.features.4.conv.1.running_mean", "module.features.4.conv.1.running_var", "module.features.4.conv.3.weight", "module.features.4.conv.4.weight", "module.features.4.conv.4.bias", "module.features.4.conv.4.running_mean", "module.features.4.conv.4.running_var", "module.features.4.conv.5.fc.0.weight", "module.features.4.conv.5.fc.2.weight", "module.features.4.conv.7.weight", "module.features.4.conv.8.weight", "module.features.4.conv.8.bias", "module.features.4.conv.8.running_mean", "module.features.4.conv.8.running_var", "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.7.weight", "module.features.7.bias", "module.classifier.1.weight", "module.classifier.1.bias". | ||
705 | + Unexpected key(s) in state_dict: "module._conv_stem.weight", "module._bn0.weight", "module._bn0.bias", "module._bn0.running_mean", "module._bn0.running_var", "module._bn0.num_batches_tracked", "module._blocks.0._depthwise_conv.weight", "module._blocks.0._bn1.weight", "module._blocks.0._bn1.bias", "module._blocks.0._bn1.running_mean", "module._blocks.0._bn1.running_var", "module._blocks.0._bn1.num_batches_tracked", "module._blocks.0._se_reduce.weight", "module._blocks.0._se_reduce.bias", "module._blocks.0._se_expand.weight", "module._blocks.0._se_expand.bias", "module._blocks.0._project_conv.weight", "module._blocks.0._bn2.weight", "module._blocks.0._bn2.bias", "module._blocks.0._bn2.running_mean", "module._blocks.0._bn2.running_var", "module._blocks.0._bn2.num_batches_tracked", "module._blocks.1._expand_conv.weight", "module._blocks.1._bn0.weight", "module._blocks.1._bn0.bias", "module._blocks.1._bn0.running_mean", "module._blocks.1._bn0.running_var", "module._blocks.1._bn0.num_batches_tracked", "module._blocks.1._depthwise_conv.weight", "module._blocks.1._bn1.weight", "module._blocks.1._bn1.bias", "module._blocks.1._bn1.running_mean", "module._blocks.1._bn1.running_var", "module._blocks.1._bn1.num_batches_tracked", "module._blocks.1._se_reduce.weight", "module._blocks.1._se_reduce.bias", "module._blocks.1._se_expand.weight", "module._blocks.1._se_expand.bias", "module._blocks.1._project_conv.weight", "module._blocks.1._bn2.weight", "module._blocks.1._bn2.bias", "module._blocks.1._bn2.running_mean", "module._blocks.1._bn2.running_var", "module._blocks.1._bn2.num_batches_tracked", "module._blocks.2._expand_conv.weight", "module._blocks.2._bn0.weight", "module._blocks.2._bn0.bias", "module._blocks.2._bn0.running_mean", "module._blocks.2._bn0.running_var", "module._blocks.2._bn0.num_batches_tracked", "module._blocks.2._depthwise_conv.weight", "module._blocks.2._bn1.weight", "module._blocks.2._bn1.bias", "module._blocks.2._bn1.running_mean", "module._blocks.2._bn1.running_var", "module._blocks.2._bn1.num_batches_tracked", "module._blocks.2._se_reduce.weight", "module._blocks.2._se_reduce.bias", "module._blocks.2._se_expand.weight", "module._blocks.2._se_expand.bias", "module._blocks.2._project_conv.weight", "module._blocks.2._bn2.weight", "module._blocks.2._bn2.bias", "module._blocks.2._bn2.running_mean", "module._blocks.2._bn2.running_var", "module._blocks.2._bn2.num_batches_tracked", "module._blocks.3._expand_conv.weight", "module._blocks.3._bn0.weight", "module._blocks.3._bn0.bias", "module._blocks.3._bn0.running_mean", "module._blocks.3._bn0.running_var", "module._blocks.3._bn0.num_batches_tracked", "module._blocks.3._depthwise_conv.weight", "module._blocks.3._bn1.weight", "module._blocks.3._bn1.bias", "module._blocks.3._bn1.running_mean", "module._blocks.3._bn1.running_var", "module._blocks.3._bn1.num_batches_tracked", "module._blocks.3._se_reduce.weight", "module._blocks.3._se_reduce.bias", "module._blocks.3._se_expand.weight", "module._blocks.3._se_expand.bias", "module._blocks.3._project_conv.weight", "module._blocks.3._bn2.weight", "module._blocks.3._bn2.bias", "module._blocks.3._bn2.running_mean", "module._blocks.3._bn2.running_var", "module._blocks.3._bn2.num_batches_tracked", "module._blocks.4._expand_conv.weight", "module._blocks.4._bn0.weight", "module._blocks.4._bn0.bias", "module._blocks.4._bn0.running_mean", "module._blocks.4._bn0.running_var", "module._blocks.4._bn0.num_batches_tracked", "module._blocks.4._depthwise_conv.weight", "module._blocks.4._bn1.weight", "module._blocks.4._bn1.bias", "module._blocks.4._bn1.running_mean", "module._blocks.4._bn1.running_var", "module._blocks.4._bn1.num_batches_tracked", "module._blocks.4._se_reduce.weight", "module._blocks.4._se_reduce.bias", "module._blocks.4._se_expand.weight", "module._blocks.4._se_expand.bias", "module._blocks.4._project_conv.weight", "module._blocks.4._bn2.weight", "module._blocks.4._bn2.bias", "module._blocks.4._bn2.running_mean", "module._blocks.4._bn2.running_var", "module._blocks.4._bn2.num_batches_tracked", "module._blocks.5._expand_conv.weight", "module._blocks.5._bn0.weight", "module._blocks.5._bn0.bias", "module._blocks.5._bn0.running_mean", "module._blocks.5._bn0.running_var", "module._blocks.5._bn0.num_batches_tracked", "module._blocks.5._depthwise_conv.weight", "module._blocks.5._bn1.weight", "module._blocks.5._bn1.bias", "module._blocks.5._bn1.running_mean", "module._blocks.5._bn1.running_var", "module._blocks.5._bn1.num_batches_tracked", "module._blocks.5._se_reduce.weight", "module._blocks.5._se_reduce.bias", "module._blocks.5._se_expand.weight", "module._blocks.5._se_expand.bias", "module._blocks.5._project_conv.weight", "module._blocks.5._bn2.weight", "module._blocks.5._bn2.bias", "module._blocks.5._bn2.running_mean", "module._blocks.5._bn2.running_var", "module._blocks.5._bn2.num_batches_tracked", "module._blocks.6._expand_conv.weight", "module._blocks.6._bn0.weight", "module._blocks.6._bn0.bias", "module._blocks.6._bn0.running_mean", "module._blocks.6._bn0.running_var", "module._blocks.6._bn0.num_batches_tracked", "module._blocks.6._depthwise_conv.weight", "module._blocks.6._bn1.weight", "module._blocks.6._bn1.bias", "module._blocks.6._bn1.running_mean", "module._blocks.6._bn1.running_var", "module._blocks.6._bn1.num_batches_tracked", "module._blocks.6._se_reduce.weight", "module._blocks.6._se_reduce.bias", "module._blocks.6._se_expand.weight", "module._blocks.6._se_expand.bias", "module._blocks.6._project_conv.weight", "module._blocks.6._bn2.weight", "module._blocks.6._bn2.bias", "module._blocks.6._bn2.running_mean", "module._blocks.6._bn2.running_var", "module._blocks.6._bn2.num_batches_tracked", "module._blocks.7._expand_conv.weight", "module._blocks.7._bn0.weight", "module._blocks.7._bn0.bias", "module._blocks.7._bn0.running_mean", "module._blocks.7._bn0.running_var", "module._blocks.7._bn0.num_batches_tracked", "module._blocks.7._depthwise_conv.weight", "module._blocks.7._bn1.weight", "module._blocks.7._bn1.bias", "module._blocks.7._bn1.running_mean", "module._blocks.7._bn1.running_var", "module._blocks.7._bn1.num_batches_tracked", "module._blocks.7._se_reduce.weight", "module._blocks.7._se_reduce.bias", "module._blocks.7._se_expand.weight", "module._blocks.7._se_expand.bias", "module._blocks.7._project_conv.weight", "module._blocks.7._bn2.weight", "module._blocks.7._bn2.bias", "module._blocks.7._bn2.running_mean", "module._blocks.7._bn2.running_var", "module._blocks.7._bn2.num_batches_tracked", "module._blocks.8._expand_conv.weight", "module._blocks.8._bn0.weight", "module._blocks.8._bn0.bias", "module._blocks.8._bn0.running_mean", "module._blocks.8._bn0.running_var", "module._blocks.8._bn0.num_batches_tracked", "module._blocks.8._depthwise_conv.weight", "module._blocks.8._bn1.weight", "module._blocks.8._bn1.bias", "module._blocks.8._bn1.running_mean", "module._blocks.8._bn1.running_var", "module._blocks.8._bn1.num_batches_tracked", "module._blocks.8._se_reduce.weight", "module._blocks.8._se_reduce.bias", "module._blocks.8._se_expand.weight", "module._blocks.8._se_expand.bias", "module._blocks.8._project_conv.weight", "module._blocks.8._bn2.weight", "module._blocks.8._bn2.bias", "module._blocks.8._bn2.running_mean", "module._blocks.8._bn2.running_var", "module._blocks.8._bn2.num_batches_tracked", "module._blocks.9._expand_conv.weight", "module._blocks.9._bn0.weight", "module._blocks.9._bn0.bias", "module._blocks.9._bn0.running_mean", "module._blocks.9._bn0.running_var", "module._blocks.9._bn0.num_batches_tracked", "module._blocks.9._depthwise_conv.weight", "module._blocks.9._bn1.weight", "module._blocks.9._bn1.bias", "module._blocks.9._bn1.running_mean", "module._blocks.9._bn1.running_var", "module._blocks.9._bn1.num_batches_tracked", "module._blocks.9._se_reduce.weight", "module._blocks.9._se_reduce.bias", "module._blocks.9._se_expand.weight", "module._blocks.9._se_expand.bias", "module._blocks.9._project_conv.weight", "module._blocks.9._bn2.weight", "module._blocks.9._bn2.bias", "module._blocks.9._bn2.running_mean", "module._blocks.9._bn2.running_var", "module._blocks.9._bn2.num_batches_tracked", "module._blocks.10._expand_conv.weight", "module._blocks.10._bn0.weight", "module._blocks.10._bn0.bias", "module._blocks.10._bn0.running_mean", "module._blocks.10._bn0.running_var", "module._blocks.10._bn0.num_batches_tracked", "module._blocks.10._depthwise_conv.weight", "module._blocks.10._bn1.weight", "module._blocks.10._bn1.bias", "module._blocks.10._bn1.running_mean", "module._blocks.10._bn1.running_var", "module._blocks.10._bn1.num_batches_tracked", "module._blocks.10._se_reduce.weight", "module._blocks.10._se_reduce.bias", "module._blocks.10._se_expand.weight", "module._blocks.10._se_expand.bias", "module._blocks.10._project_conv.weight", "module._blocks.10._bn2.weight", "module._blocks.10._bn2.bias", "module._blocks.10._bn2.running_mean", "module._blocks.10._bn2.running_var", "module._blocks.10._bn2.num_batches_tracked", "module._blocks.11._expand_conv.weight", "module._blocks.11._bn0.weight", "module._blocks.11._bn0.bias", "module._blocks.11._bn0.running_mean", "module._blocks.11._bn0.running_var", "module._blocks.11._bn0.num_batches_tracked", "module._blocks.11._depthwise_conv.weight", "module._blocks.11._bn1.weight", "module._blocks.11._bn1.bias", "module._blocks.11._bn1.running_mean", "module._blocks.11._bn1.running_var", "module._blocks.11._bn1.num_batches_tracked", "module._blocks.11._se_reduce.weight", "module._blocks.11._se_reduce.bias", "module._blocks.11._se_expand.weight", "module._blocks.11._se_expand.bias", "module._blocks.11._project_conv.weight", "module._blocks.11._bn2.weight", "module._blocks.11._bn2.bias", "module._blocks.11._bn2.running_mean", "module._blocks.11._bn2.running_var", "module._blocks.11._bn2.num_batches_tracked", "module._blocks.12._expand_conv.weight", "module._blocks.12._bn0.weight", "module._blocks.12._bn0.bias", "module._blocks.12._bn0.running_mean", "module._blocks.12._bn0.running_var", "module._blocks.12._bn0.num_batches_tracked", "module._blocks.12._depthwise_conv.weight", "module._blocks.12._bn1.weight", "module._blocks.12._bn1.bias", "module._blocks.12._bn1.running_mean", "module._blocks.12._bn1.running_var", "module._blocks.12._bn1.num_batches_tracked", "module._blocks.12._se_reduce.weight", "module._blocks.12._se_reduce.bias", "module._blocks.12._se_expand.weight", "module._blocks.12._se_expand.bias", "module._blocks.12._project_conv.weight", "module._blocks.12._bn2.weight", "module._blocks.12._bn2.bias", "module._blocks.12._bn2.running_mean", "module._blocks.12._bn2.running_var", "module._blocks.12._bn2.num_batches_tracked", "module._blocks.13._expand_conv.weight", "module._blocks.13._bn0.weight", "module._blocks.13._bn0.bias", "module._blocks.13._bn0.running_mean", "module._blocks.13._bn0.running_var", "module._blocks.13._bn0.num_batches_tracked", "module._blocks.13._depthwise_conv.weight", "module._blocks.13._bn1.weight", "module._blocks.13._bn1.bias", "module._blocks.13._bn1.running_mean", "module._blocks.13._bn1.running_var", "module._blocks.13._bn1.num_batches_tracked", "module._blocks.13._se_reduce.weight", "module._blocks.13._se_reduce.bias", "module._blocks.13._se_expand.weight", "module._blocks.13._se_expand.bias", "module._blocks.13._project_conv.weight", "module._blocks.13._bn2.weight", "module._blocks.13._bn2.bias", "module._blocks.13._bn2.running_mean", "module._blocks.13._bn2.running_var", "module._blocks.13._bn2.num_batches_tracked", "module._blocks.14._expand_conv.weight", "module._blocks.14._bn0.weight", "module._blocks.14._bn0.bias", "module._blocks.14._bn0.running_mean", "module._blocks.14._bn0.running_var", "module._blocks.14._bn0.num_batches_tracked", "module._blocks.14._depthwise_conv.weight", "module._blocks.14._bn1.weight", "module._blocks.14._bn1.bias", "module._blocks.14._bn1.running_mean", "module._blocks.14._bn1.running_var", "module._blocks.14._bn1.num_batches_tracked", "module._blocks.14._se_reduce.weight", "module._blocks.14._se_reduce.bias", "module._blocks.14._se_expand.weight", "module._blocks.14._se_expand.bias", "module._blocks.14._project_conv.weight", "module._blocks.14._bn2.weight", "module._blocks.14._bn2.bias", "module._blocks.14._bn2.running_mean", "module._blocks.14._bn2.running_var", "module._blocks.14._bn2.num_batches_tracked", "module._blocks.15._expand_conv.weight", "module._blocks.15._bn0.weight", "module._blocks.15._bn0.bias", "module._blocks.15._bn0.running_mean", "module._blocks.15._bn0.running_var", "module._blocks.15._bn0.num_batches_tracked", "module._blocks.15._depthwise_conv.weight", "module._blocks.15._bn1.weight", "module._blocks.15._bn1.bias", "module._blocks.15._bn1.running_mean", "module._blocks.15._bn1.running_var", "module._blocks.15._bn1.num_batches_tracked", "module._blocks.15._se_reduce.weight", "module._blocks.15._se_reduce.bias", "module._blocks.15._se_expand.weight", "module._blocks.15._se_expand.bias", "module._blocks.15._project_conv.weight", "module._blocks.15._bn2.weight", "module._blocks.15._bn2.bias", "module._blocks.15._bn2.running_mean", "module._blocks.15._bn2.running_var", "module._blocks.15._bn2.num_batches_tracked", "module._conv_head.weight", "module._bn1.weight", "module._bn1.bias", "module._bn1.running_mean", "module._bn1.running_var", "module._bn1.num_batches_tracked", "module._fc.weight", "module._fc.bias". | ||
706 | +[eval] failed | ||
707 | +Number of model parameters: 4009534 | ||
708 | +=> loading checkpoint 'output/Error/39852_model=Efficientnet-ep=3000-block=4/checkpoint.pth.tar' | ||
709 | +=> loaded checkpoint 'output/Error/39852_model=Efficientnet-ep=3000-block=4/checkpoint.pth.tar' (epoch 60) | ||
710 | +Test: [0/214] Time 11.201 (11.201) Loss 0.2607 (0.2607) Prec@1 87.500 (87.500) | ||
711 | +Test: [1/214] Time 0.067 (5.634) Loss 0.1578 (0.2093) Prec@1 100.000 (93.750) | ||
712 | +Test: [2/214] Time 0.046 (3.771) Loss 0.1060 (0.1748) Prec@1 100.000 (95.833) | ||
713 | +Test: [3/214] Time 0.061 (2.844) Loss 0.3034 (0.2070) Prec@1 81.250 (92.188) | ||
714 | +Test: [4/214] Time 0.087 (2.292) Loss 0.1685 (0.1993) Prec@1 87.500 (91.250) | ||
715 | +Test: [5/214] Time 0.091 (1.925) Loss 0.1480 (0.1907) Prec@1 93.750 (91.667) | ||
716 | +Test: [6/214] Time 0.044 (1.657) Loss 0.1116 (0.1794) Prec@1 93.750 (91.964) | ||
717 | +Test: [7/214] Time 0.045 (1.455) Loss 0.2224 (0.1848) Prec@1 87.500 (91.406) | ||
718 | +Test: [8/214] Time 0.132 (1.308) Loss 0.2058 (0.1871) Prec@1 93.750 (91.667) | ||
719 | +Test: [9/214] Time 0.082 (1.186) Loss 0.4716 (0.2156) Prec@1 75.000 (90.000) | ||
720 | +Test: [10/214] Time 0.131 (1.090) Loss 0.9066 (0.2784) Prec@1 50.000 (86.364) | ||
721 | +Test: [11/214] Time 0.119 (1.009) Loss 0.3244 (0.2822) Prec@1 75.000 (85.417) | ||
722 | +Test: [12/214] Time 0.097 (0.939) Loss 0.4440 (0.2947) Prec@1 81.250 (85.096) | ||
723 | +Test: [13/214] Time 0.056 (0.876) Loss 0.2533 (0.2917) Prec@1 87.500 (85.268) | ||
724 | +Test: [14/214] Time 0.050 (0.821) Loss 0.4654 (0.3033) Prec@1 68.750 (84.167) | ||
725 | +Test: [15/214] Time 0.063 (0.773) Loss 0.1495 (0.2937) Prec@1 93.750 (84.766) | ||
726 | +Test: [16/214] Time 0.096 (0.733) Loss 0.2305 (0.2900) Prec@1 93.750 (85.294) | ||
727 | +Test: [17/214] Time 0.095 (0.698) Loss 0.2496 (0.2877) Prec@1 87.500 (85.417) | ||
728 | +Test: [18/214] Time 0.135 (0.668) Loss 0.3048 (0.2886) Prec@1 75.000 (84.868) | ||
729 | +Test: [19/214] Time 0.158 (0.643) Loss 0.3770 (0.2930) Prec@1 87.500 (85.000) | ||
730 | +Test: [20/214] Time 0.081 (0.616) Loss 0.3288 (0.2947) Prec@1 87.500 (85.119) | ||
731 | +Test: [21/214] Time 0.042 (0.590) Loss 0.1715 (0.2891) Prec@1 93.750 (85.511) | ||
732 | +Test: [22/214] Time 0.044 (0.566) Loss 0.4496 (0.2961) Prec@1 81.250 (85.326) | ||
733 | +Test: [23/214] Time 0.053 (0.545) Loss 0.1729 (0.2910) Prec@1 93.750 (85.677) | ||
734 | +Test: [24/214] Time 0.078 (0.526) Loss 0.0676 (0.2820) Prec@1 100.000 (86.250) | ||
735 | +Test: [25/214] Time 0.120 (0.511) Loss 0.4611 (0.2889) Prec@1 81.250 (86.058) | ||
736 | +Test: [26/214] Time 0.095 (0.495) Loss 0.2551 (0.2877) Prec@1 87.500 (86.111) | ||
737 | +Test: [27/214] Time 0.086 (0.481) Loss 0.3308 (0.2892) Prec@1 81.250 (85.938) | ||
738 | +Test: [28/214] Time 0.073 (0.466) Loss 0.2250 (0.2870) Prec@1 87.500 (85.991) | ||
739 | +Test: [29/214] Time 0.065 (0.453) Loss 0.5208 (0.2948) Prec@1 62.500 (85.208) | ||
740 | +Test: [30/214] Time 0.053 (0.440) Loss 0.4636 (0.3002) Prec@1 75.000 (84.879) | ||
741 | +Test: [31/214] Time 0.056 (0.428) Loss 0.3666 (0.3023) Prec@1 75.000 (84.570) | ||
742 | +Test: [32/214] Time 0.050 (0.417) Loss 0.2060 (0.2994) Prec@1 87.500 (84.659) | ||
743 | +Test: [33/214] Time 0.098 (0.407) Loss 0.1164 (0.2940) Prec@1 93.750 (84.926) | ||
744 | +Test: [34/214] Time 0.143 (0.400) Loss 0.3333 (0.2951) Prec@1 87.500 (85.000) | ||
745 | +Test: [35/214] Time 0.113 (0.392) Loss 0.1818 (0.2920) Prec@1 93.750 (85.243) | ||
746 | +Test: [36/214] Time 0.106 (0.384) Loss 0.2087 (0.2897) Prec@1 87.500 (85.304) | ||
747 | +Test: [37/214] Time 0.101 (0.377) Loss 0.2186 (0.2879) Prec@1 87.500 (85.362) | ||
748 | +Test: [38/214] Time 0.096 (0.369) Loss 0.0996 (0.2830) Prec@1 93.750 (85.577) | ||
749 | +Test: [39/214] Time 0.083 (0.362) Loss 0.3121 (0.2838) Prec@1 93.750 (85.781) | ||
750 | +Test: [40/214] Time 0.062 (0.355) Loss 0.2222 (0.2823) Prec@1 87.500 (85.823) | ||
751 | +Test: [41/214] Time 0.080 (0.348) Loss 0.4081 (0.2853) Prec@1 81.250 (85.714) | ||
752 | +Test: [42/214] Time 0.064 (0.342) Loss 0.3267 (0.2862) Prec@1 81.250 (85.610) | ||
753 | +Test: [43/214] Time 0.045 (0.335) Loss 0.0808 (0.2816) Prec@1 100.000 (85.938) | ||
754 | +Test: [44/214] Time 0.066 (0.329) Loss 0.5449 (0.2874) Prec@1 62.500 (85.417) | ||
755 | +Test: [45/214] Time 0.093 (0.324) Loss 0.3741 (0.2893) Prec@1 81.250 (85.326) | ||
756 | +Test: [46/214] Time 0.059 (0.318) Loss 0.1078 (0.2854) Prec@1 93.750 (85.505) | ||
757 | +Test: [47/214] Time 0.071 (0.313) Loss 0.2775 (0.2853) Prec@1 87.500 (85.547) | ||
758 | +Test: [48/214] Time 0.105 (0.309) Loss 0.3871 (0.2873) Prec@1 68.750 (85.204) | ||
759 | +Test: [49/214] Time 0.094 (0.305) Loss 0.5400 (0.2924) Prec@1 75.000 (85.000) | ||
760 | +Test: [50/214] Time 0.074 (0.300) Loss 0.2073 (0.2907) Prec@1 93.750 (85.172) | ||
761 | +Test: [51/214] Time 0.120 (0.297) Loss 0.3317 (0.2915) Prec@1 81.250 (85.096) | ||
762 | +Test: [52/214] Time 0.284 (0.296) Loss 0.4489 (0.2945) Prec@1 68.750 (84.788) | ||
763 | +Test: [53/214] Time 0.129 (0.293) Loss 0.3582 (0.2957) Prec@1 81.250 (84.722) | ||
764 | +Test: [54/214] Time 0.097 (0.290) Loss 0.2803 (0.2954) Prec@1 93.750 (84.886) | ||
765 | +Test: [55/214] Time 0.132 (0.287) Loss 0.2129 (0.2939) Prec@1 93.750 (85.045) | ||
766 | +Test: [56/214] Time 0.123 (0.284) Loss 0.1677 (0.2917) Prec@1 87.500 (85.088) | ||
767 | +Test: [57/214] Time 0.125 (0.281) Loss 0.3602 (0.2929) Prec@1 75.000 (84.914) | ||
768 | +Test: [58/214] Time 0.135 (0.279) Loss 0.2394 (0.2920) Prec@1 87.500 (84.958) | ||
769 | +Test: [59/214] Time 0.546 (0.283) Loss 0.3783 (0.2934) Prec@1 81.250 (84.896) | ||
770 | +Test: [60/214] Time 0.082 (0.280) Loss 0.3108 (0.2937) Prec@1 81.250 (84.836) | ||
771 | +Test: [61/214] Time 0.059 (0.276) Loss 0.1151 (0.2908) Prec@1 93.750 (84.980) | ||
772 | +Test: [62/214] Time 0.093 (0.274) Loss 0.1965 (0.2893) Prec@1 87.500 (85.020) | ||
773 | +Test: [63/214] Time 0.075 (0.270) Loss 0.3316 (0.2900) Prec@1 75.000 (84.863) | ||
774 | +Test: [64/214] Time 0.105 (0.268) Loss 0.3229 (0.2905) Prec@1 81.250 (84.808) | ||
775 | +Test: [65/214] Time 0.108 (0.265) Loss 0.1841 (0.2889) Prec@1 87.500 (84.848) | ||
776 | +Test: [66/214] Time 0.096 (0.263) Loss 0.2653 (0.2885) Prec@1 87.500 (84.888) | ||
777 | +Test: [67/214] Time 0.069 (0.260) Loss 0.2731 (0.2883) Prec@1 81.250 (84.835) | ||
778 | +Test: [68/214] Time 0.057 (0.257) Loss 0.1263 (0.2859) Prec@1 93.750 (84.964) | ||
779 | +Test: [69/214] Time 0.056 (0.254) Loss 0.2317 (0.2852) Prec@1 93.750 (85.089) | ||
780 | +Test: [70/214] Time 0.050 (0.251) Loss 0.1820 (0.2837) Prec@1 87.500 (85.123) | ||
781 | +Test: [71/214] Time 0.067 (0.249) Loss 0.4579 (0.2861) Prec@1 75.000 (84.983) | ||
782 | +Test: [72/214] Time 0.110 (0.247) Loss 0.2124 (0.2851) Prec@1 87.500 (85.017) | ||
783 | +Test: [73/214] Time 0.147 (0.246) Loss 0.4542 (0.2874) Prec@1 62.500 (84.713) | ||
784 | +Test: [74/214] Time 0.176 (0.245) Loss 0.2970 (0.2875) Prec@1 75.000 (84.583) | ||
785 | +Test: [75/214] Time 0.101 (0.243) Loss 0.0447 (0.2843) Prec@1 100.000 (84.786) | ||
786 | +Test: [76/214] Time 0.077 (0.241) Loss 0.4577 (0.2866) Prec@1 81.250 (84.740) | ||
787 | +Test: [77/214] Time 0.053 (0.238) Loss 0.0922 (0.2841) Prec@1 100.000 (84.936) | ||
788 | +Test: [78/214] Time 0.053 (0.236) Loss 0.2799 (0.2841) Prec@1 87.500 (84.968) | ||
789 | +Test: [79/214] Time 0.062 (0.234) Loss 0.2562 (0.2837) Prec@1 81.250 (84.922) | ||
790 | +Test: [80/214] Time 0.062 (0.232) Loss 0.1314 (0.2818) Prec@1 93.750 (85.031) | ||
791 | +Test: [81/214] Time 0.061 (0.230) Loss 0.1779 (0.2806) Prec@1 93.750 (85.137) | ||
792 | +Test: [82/214] Time 0.193 (0.229) Loss 0.3503 (0.2814) Prec@1 93.750 (85.241) | ||
793 | +Test: [83/214] Time 0.070 (0.227) Loss 0.2390 (0.2809) Prec@1 93.750 (85.342) | ||
794 | +Test: [84/214] Time 0.067 (0.225) Loss 0.2989 (0.2811) Prec@1 87.500 (85.368) | ||
795 | +Test: [85/214] Time 0.077 (0.224) Loss 0.3316 (0.2817) Prec@1 81.250 (85.320) | ||
796 | +Test: [86/214] Time 0.047 (0.222) Loss 0.2570 (0.2814) Prec@1 87.500 (85.345) | ||
797 | +Test: [87/214] Time 0.055 (0.220) Loss 0.3271 (0.2819) Prec@1 81.250 (85.298) | ||
798 | +Test: [88/214] Time 0.047 (0.218) Loss 0.1733 (0.2807) Prec@1 93.750 (85.393) | ||
799 | +Test: [89/214] Time 0.054 (0.216) Loss 0.6486 (0.2848) Prec@1 62.500 (85.139) | ||
800 | +Test: [90/214] Time 0.042 (0.214) Loss 0.1517 (0.2833) Prec@1 93.750 (85.234) | ||
801 | +Test: [91/214] Time 0.045 (0.212) Loss 0.3418 (0.2840) Prec@1 81.250 (85.190) | ||
802 | +Test: [92/214] Time 0.051 (0.210) Loss 0.5220 (0.2865) Prec@1 68.750 (85.013) | ||
803 | +Test: [93/214] Time 0.057 (0.209) Loss 0.4164 (0.2879) Prec@1 75.000 (84.907) | ||
804 | +Test: [94/214] Time 0.068 (0.207) Loss 0.4121 (0.2892) Prec@1 81.250 (84.868) | ||
805 | +Test: [95/214] Time 0.056 (0.206) Loss 0.1503 (0.2878) Prec@1 87.500 (84.896) | ||
806 | +Test: [96/214] Time 0.087 (0.204) Loss 0.2766 (0.2877) Prec@1 81.250 (84.858) | ||
807 | +Test: [97/214] Time 0.115 (0.204) Loss 0.2395 (0.2872) Prec@1 93.750 (84.949) | ||
808 | +Test: [98/214] Time 0.138 (0.203) Loss 0.4426 (0.2887) Prec@1 75.000 (84.848) | ||
809 | +Test: [99/214] Time 0.144 (0.202) Loss 0.4849 (0.2907) Prec@1 75.000 (84.750) | ||
810 | +Test: [100/214] Time 0.131 (0.202) Loss 0.1662 (0.2895) Prec@1 87.500 (84.777) | ||
811 | +Test: [101/214] Time 0.092 (0.201) Loss 0.1042 (0.2876) Prec@1 93.750 (84.865) | ||
812 | +Test: [102/214] Time 0.088 (0.199) Loss 0.4668 (0.2894) Prec@1 81.250 (84.830) | ||
813 | +Test: [103/214] Time 0.082 (0.198) Loss 0.3621 (0.2901) Prec@1 81.250 (84.796) | ||
814 | +Test: [104/214] Time 0.077 (0.197) Loss 0.4676 (0.2918) Prec@1 81.250 (84.762) | ||
815 | +Test: [105/214] Time 0.101 (0.196) Loss 0.1992 (0.2909) Prec@1 87.500 (84.788) | ||
816 | +Test: [106/214] Time 0.150 (0.196) Loss 0.2772 (0.2908) Prec@1 93.750 (84.871) | ||
817 | +Test: [107/214] Time 0.070 (0.195) Loss 0.3883 (0.2917) Prec@1 75.000 (84.780) | ||
818 | +Test: [108/214] Time 0.078 (0.194) Loss 0.2923 (0.2917) Prec@1 81.250 (84.748) | ||
819 | +Test: [109/214] Time 0.094 (0.193) Loss 0.3286 (0.2920) Prec@1 87.500 (84.773) | ||
820 | +Test: [110/214] Time 0.046 (0.191) Loss 0.1382 (0.2906) Prec@1 93.750 (84.854) | ||
821 | +Test: [111/214] Time 0.050 (0.190) Loss 0.2462 (0.2902) Prec@1 87.500 (84.877) | ||
822 | +Test: [112/214] Time 0.085 (0.189) Loss 0.1496 (0.2890) Prec@1 100.000 (85.011) | ||
823 | +Test: [113/214] Time 0.091 (0.188) Loss 0.2322 (0.2885) Prec@1 93.750 (85.088) | ||
824 | +Test: [114/214] Time 0.144 (0.188) Loss 0.3522 (0.2890) Prec@1 81.250 (85.054) | ||
825 | +Test: [115/214] Time 0.106 (0.187) Loss 0.4547 (0.2905) Prec@1 81.250 (85.022) | ||
826 | +Test: [116/214] Time 0.050 (0.186) Loss 0.3842 (0.2913) Prec@1 87.500 (85.043) | ||
827 | +Test: [117/214] Time 0.043 (0.185) Loss 0.2003 (0.2905) Prec@1 93.750 (85.117) | ||
828 | +Test: [118/214] Time 0.049 (0.184) Loss 0.2329 (0.2900) Prec@1 87.500 (85.137) | ||
829 | +Test: [119/214] Time 0.053 (0.183) Loss 0.1716 (0.2890) Prec@1 93.750 (85.208) | ||
830 | +Test: [120/214] Time 0.043 (0.181) Loss 0.3089 (0.2892) Prec@1 87.500 (85.227) | ||
831 | +Test: [121/214] Time 0.048 (0.180) Loss 0.3135 (0.2894) Prec@1 87.500 (85.246) | ||
832 | +Test: [122/214] Time 0.072 (0.179) Loss 0.4701 (0.2909) Prec@1 68.750 (85.112) | ||
833 | +Test: [123/214] Time 0.056 (0.178) Loss 0.4308 (0.2920) Prec@1 75.000 (85.030) | ||
834 | +Test: [124/214] Time 0.086 (0.178) Loss 0.2072 (0.2913) Prec@1 87.500 (85.050) | ||
835 | +Test: [125/214] Time 0.108 (0.177) Loss 0.4704 (0.2927) Prec@1 68.750 (84.921) | ||
836 | +Test: [126/214] Time 0.082 (0.176) Loss 0.0354 (0.2907) Prec@1 100.000 (85.039) | ||
837 | +Test: [127/214] Time 0.080 (0.176) Loss 0.1397 (0.2895) Prec@1 93.750 (85.107) | ||
838 | +Test: [128/214] Time 0.078 (0.175) Loss 0.2489 (0.2892) Prec@1 93.750 (85.174) | ||
839 | +Test: [129/214] Time 0.085 (0.174) Loss 0.2714 (0.2891) Prec@1 81.250 (85.144) | ||
840 | +Test: [130/214] Time 0.189 (0.174) Loss 0.4883 (0.2906) Prec@1 81.250 (85.115) | ||
841 | +Test: [131/214] Time 0.065 (0.174) Loss 0.2942 (0.2906) Prec@1 87.500 (85.133) | ||
842 | +Test: [132/214] Time 0.051 (0.173) Loss 0.1635 (0.2897) Prec@1 100.000 (85.244) | ||
843 | +Test: [133/214] Time 0.058 (0.172) Loss 0.7558 (0.2932) Prec@1 62.500 (85.075) | ||
844 | +Test: [134/214] Time 0.057 (0.171) Loss 0.1536 (0.2921) Prec@1 93.750 (85.139) | ||
845 | +Test: [135/214] Time 0.076 (0.170) Loss 0.4966 (0.2936) Prec@1 75.000 (85.064) | ||
846 | +Test: [136/214] Time 0.036 (0.169) Loss 0.2342 (0.2932) Prec@1 87.500 (85.082) | ||
847 | +Test: [137/214] Time 0.095 (0.169) Loss 0.2743 (0.2930) Prec@1 87.500 (85.100) | ||
848 | +Test: [138/214] Time 0.106 (0.168) Loss 0.3421 (0.2934) Prec@1 87.500 (85.117) | ||
849 | +Test: [139/214] Time 0.175 (0.168) Loss 0.3205 (0.2936) Prec@1 81.250 (85.089) | ||
850 | +Test: [140/214] Time 0.123 (0.168) Loss 0.4365 (0.2946) Prec@1 81.250 (85.062) | ||
851 | +Test: [141/214] Time 0.130 (0.168) Loss 0.4789 (0.2959) Prec@1 75.000 (84.991) | ||
852 | +Test: [142/214] Time 0.156 (0.168) Loss 0.3277 (0.2961) Prec@1 81.250 (84.965) | ||
853 | +Test: [143/214] Time 0.147 (0.167) Loss 0.5026 (0.2976) Prec@1 75.000 (84.896) | ||
854 | +Test: [144/214] Time 0.109 (0.167) Loss 0.2051 (0.2969) Prec@1 81.250 (84.871) | ||
855 | +Test: [145/214] Time 0.137 (0.167) Loss 0.7212 (0.2998) Prec@1 62.500 (84.717) | ||
856 | +Test: [146/214] Time 0.115 (0.166) Loss 0.3626 (0.3003) Prec@1 81.250 (84.694) | ||
857 | +Test: [147/214] Time 0.132 (0.166) Loss 0.4104 (0.3010) Prec@1 81.250 (84.671) | ||
858 | +Test: [148/214] Time 0.111 (0.166) Loss 0.6627 (0.3034) Prec@1 68.750 (84.564) | ||
859 | +Test: [149/214] Time 0.101 (0.165) Loss 0.0992 (0.3021) Prec@1 93.750 (84.625) | ||
860 | +Test: [150/214] Time 0.112 (0.165) Loss 0.3192 (0.3022) Prec@1 81.250 (84.603) | ||
861 | +Test: [151/214] Time 0.083 (0.165) Loss 0.3352 (0.3024) Prec@1 87.500 (84.622) | ||
862 | +Test: [152/214] Time 0.159 (0.165) Loss 0.5007 (0.3037) Prec@1 68.750 (84.518) | ||
863 | +Test: [153/214] Time 0.053 (0.164) Loss 0.2930 (0.3036) Prec@1 87.500 (84.537) | ||
864 | +Test: [154/214] Time 0.054 (0.163) Loss 0.3224 (0.3037) Prec@1 93.750 (84.597) | ||
865 | +Test: [155/214] Time 0.053 (0.162) Loss 0.0933 (0.3024) Prec@1 100.000 (84.696) | ||
866 | +Test: [156/214] Time 0.057 (0.162) Loss 0.1487 (0.3014) Prec@1 93.750 (84.753) | ||
867 | +Test: [157/214] Time 0.054 (0.161) Loss 0.3715 (0.3019) Prec@1 87.500 (84.771) | ||
868 | +Test: [158/214] Time 0.120 (0.161) Loss 0.1293 (0.3008) Prec@1 100.000 (84.866) | ||
869 | +Test: [159/214] Time 0.105 (0.160) Loss 0.3710 (0.3012) Prec@1 87.500 (84.883) | ||
870 | +Test: [160/214] Time 0.088 (0.160) Loss 0.1519 (0.3003) Prec@1 93.750 (84.938) | ||
871 | +Test: [161/214] Time 0.049 (0.159) Loss 0.3831 (0.3008) Prec@1 81.250 (84.915) | ||
872 | +Test: [162/214] Time 0.053 (0.159) Loss 0.3570 (0.3011) Prec@1 81.250 (84.893) | ||
873 | +Test: [163/214] Time 0.050 (0.158) Loss 0.3296 (0.3013) Prec@1 75.000 (84.832) | ||
874 | +Test: [164/214] Time 0.051 (0.157) Loss 0.1989 (0.3007) Prec@1 93.750 (84.886) | ||
875 | +Test: [165/214] Time 0.053 (0.157) Loss 0.0796 (0.2994) Prec@1 100.000 (84.977) | ||
876 | +Test: [166/214] Time 0.058 (0.156) Loss 0.2722 (0.2992) Prec@1 81.250 (84.955) | ||
877 | +Test: [167/214] Time 0.109 (0.156) Loss 0.4920 (0.3004) Prec@1 68.750 (84.859) | ||
878 | +Test: [168/214] Time 0.084 (0.155) Loss 0.0693 (0.2990) Prec@1 100.000 (84.948) | ||
879 | +Test: [169/214] Time 0.108 (0.155) Loss 0.4123 (0.2997) Prec@1 81.250 (84.926) | ||
880 | +Test: [170/214] Time 0.058 (0.155) Loss 0.2509 (0.2994) Prec@1 81.250 (84.905) | ||
881 | +Test: [171/214] Time 0.046 (0.154) Loss 0.2394 (0.2990) Prec@1 87.500 (84.920) | ||
882 | +Test: [172/214] Time 0.062 (0.153) Loss 0.2245 (0.2986) Prec@1 87.500 (84.935) | ||
883 | +Test: [173/214] Time 0.072 (0.153) Loss 0.2048 (0.2980) Prec@1 87.500 (84.950) | ||
884 | +Test: [174/214] Time 0.104 (0.153) Loss 0.3298 (0.2982) Prec@1 81.250 (84.929) | ||
885 | +Test: [175/214] Time 0.066 (0.152) Loss 0.4080 (0.2989) Prec@1 68.750 (84.837) | ||
886 | +Test: [176/214] Time 0.048 (0.152) Loss 0.1013 (0.2977) Prec@1 100.000 (84.922) | ||
887 | +Test: [177/214] Time 0.062 (0.151) Loss 0.2795 (0.2976) Prec@1 93.750 (84.972) | ||
888 | +Test: [178/214] Time 0.086 (0.151) Loss 0.2261 (0.2972) Prec@1 87.500 (84.986) | ||
889 | +Test: [179/214] Time 0.117 (0.151) Loss 0.3375 (0.2975) Prec@1 81.250 (84.965) | ||
890 | +Test: [180/214] Time 0.122 (0.150) Loss 0.6710 (0.2995) Prec@1 68.750 (84.876) | ||
891 | +Test: [181/214] Time 0.057 (0.150) Loss 0.1985 (0.2990) Prec@1 87.500 (84.890) | ||
892 | +Test: [182/214] Time 0.106 (0.150) Loss 0.1054 (0.2979) Prec@1 93.750 (84.939) | ||
893 | +Test: [183/214] Time 0.129 (0.150) Loss 0.2059 (0.2974) Prec@1 87.500 (84.952) | ||
894 | +Test: [184/214] Time 0.118 (0.149) Loss 0.2403 (0.2971) Prec@1 81.250 (84.932) | ||
895 | +Test: [185/214] Time 0.110 (0.149) Loss 0.2152 (0.2967) Prec@1 87.500 (84.946) | ||
896 | +Test: [186/214] Time 0.143 (0.149) Loss 0.1807 (0.2960) Prec@1 87.500 (84.960) | ||
897 | +Test: [187/214] Time 0.111 (0.149) Loss 0.3571 (0.2964) Prec@1 75.000 (84.907) | ||
898 | +Test: [188/214] Time 0.120 (0.149) Loss 0.2785 (0.2963) Prec@1 81.250 (84.888) | ||
899 | +Test: [189/214] Time 0.093 (0.148) Loss 0.4799 (0.2972) Prec@1 75.000 (84.836) | ||
900 | +Test: [190/214] Time 0.134 (0.148) Loss 0.2059 (0.2968) Prec@1 93.750 (84.882) | ||
901 | +Test: [191/214] Time 0.060 (0.148) Loss 0.1261 (0.2959) Prec@1 93.750 (84.928) | ||
902 | +Test: [192/214] Time 0.121 (0.148) Loss 0.1231 (0.2950) Prec@1 93.750 (84.974) | ||
903 | +Test: [193/214] Time 0.053 (0.147) Loss 0.2488 (0.2947) Prec@1 87.500 (84.987) | ||
904 | +Test: [194/214] Time 0.052 (0.147) Loss 0.3176 (0.2949) Prec@1 87.500 (85.000) | ||
905 | +Test: [195/214] Time 0.054 (0.146) Loss 0.2220 (0.2945) Prec@1 87.500 (85.013) | ||
906 | +Test: [196/214] Time 0.105 (0.146) Loss 0.0658 (0.2933) Prec@1 100.000 (85.089) | ||
907 | +Test: [197/214] Time 0.124 (0.146) Loss 0.3964 (0.2938) Prec@1 81.250 (85.069) | ||
908 | +Test: [198/214] Time 0.103 (0.146) Loss 0.3075 (0.2939) Prec@1 81.250 (85.050) | ||
909 | +Test: [199/214] Time 0.068 (0.145) Loss 0.4991 (0.2949) Prec@1 81.250 (85.031) | ||
910 | +Test: [200/214] Time 0.056 (0.145) Loss 0.2989 (0.2950) Prec@1 75.000 (84.981) | ||
911 | +Test: [201/214] Time 0.047 (0.144) Loss 0.1454 (0.2942) Prec@1 87.500 (84.994) | ||
912 | +Test: [202/214] Time 0.069 (0.144) Loss 0.1171 (0.2933) Prec@1 93.750 (85.037) | ||
913 | +Test: [203/214] Time 0.057 (0.144) Loss 0.1601 (0.2927) Prec@1 93.750 (85.080) | ||
914 | +Test: [204/214] Time 0.047 (0.143) Loss 0.2712 (0.2926) Prec@1 81.250 (85.061) | ||
915 | +Test: [205/214] Time 0.052 (0.143) Loss 0.3653 (0.2929) Prec@1 81.250 (85.042) | ||
916 | +Test: [206/214] Time 0.074 (0.142) Loss 0.3932 (0.2934) Prec@1 75.000 (84.994) | ||
917 | +Test: [207/214] Time 0.071 (0.142) Loss 0.3791 (0.2938) Prec@1 81.250 (84.976) | ||
918 | +Test: [208/214] Time 0.094 (0.142) Loss 0.2602 (0.2937) Prec@1 81.250 (84.958) | ||
919 | +Test: [209/214] Time 0.095 (0.142) Loss 0.6295 (0.2953) Prec@1 62.500 (84.851) | ||
920 | +Test: [210/214] Time 0.068 (0.141) Loss 0.2665 (0.2951) Prec@1 87.500 (84.864) | ||
921 | +Test: [211/214] Time 0.058 (0.141) Loss 0.3689 (0.2955) Prec@1 81.250 (84.847) | ||
922 | +Test: [212/214] Time 0.041 (0.140) Loss 0.2952 (0.2955) Prec@1 87.500 (84.859) | ||
923 | +Test: [213/214] Time 0.558 (0.142) Loss 0.1382 (0.2949) Prec@1 92.308 (84.887) | ||
924 | + * Prec@1 84.887 | ||
925 | +Creating CAM | ||
926 | +Fatal error in main loop | ||
927 | +Traceback (most recent call last): | ||
928 | + File "test.py", line 173, in main | ||
929 | + run_model(args, q) | ||
930 | + File "test.py", line 255, in run_model | ||
931 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
932 | + File "test.py", line 313, in validate | ||
933 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class']) | ||
934 | + File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 183, in make_grad_cam | ||
935 | + probs, ids = bp.forward(images) # sorted | ||
936 | + File "/home/yh9468/detection/trainer/visualize/grad_cam_utils.py", line 57, in forward | ||
937 | + return super(BackPropagation, self).forward(self.image) | ||
938 | + File "/home/yh9468/detection/trainer/visualize/grad_cam_utils.py", line 31, in forward | ||
939 | + self.logits = self.model(image) | ||
940 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
941 | + result = self.forward(*input, **kwargs) | ||
942 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/data_parallel.py", line 152, in forward | ||
943 | + outputs = self.parallel_apply(replicas, inputs, kwargs) | ||
944 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/data_parallel.py", line 162, in parallel_apply | ||
945 | + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) | ||
946 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/parallel_apply.py", line 83, in parallel_apply | ||
947 | + raise output | ||
948 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/parallel_apply.py", line 59, in _worker | ||
949 | + output = module(*input, **kwargs) | ||
950 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
951 | + result = self.forward(*input, **kwargs) | ||
952 | + File "/home/yh9468/detection/trainer/model.py", line 385, in forward | ||
953 | + x = self.extract_features(inputs) | ||
954 | + File "/home/yh9468/detection/trainer/model.py", line 374, in extract_features | ||
955 | + x = block(x, drop_connect_rate=drop_connect_rate) | ||
956 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
957 | + result = self.forward(*input, **kwargs) | ||
958 | + File "/home/yh9468/detection/trainer/model.py", line 272, in forward | ||
959 | + x = self._swish(self._bn0(self._expand_conv(inputs))) | ||
960 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__ | ||
961 | + result = self.forward(*input, **kwargs) | ||
962 | + File "/home/yh9468/detection/trainer/utils.py", line 139, in forward | ||
963 | + return SwishImplementation.apply(x) | ||
964 | + File "/home/yh9468/detection/trainer/utils.py", line 126, in forward | ||
965 | + result = i * torch.sigmoid(i) | ||
966 | +RuntimeError: CUDA out of memory. Tried to allocate 262.00 MiB (GPU 0; 10.92 GiB total capacity; 7.22 GiB already allocated; 51.56 MiB free; 16.05 MiB cached) | ||
967 | +[eval] failed | ||
968 | +Number of model parameters: 154706 | ||
969 | +=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
970 | +=> loaded checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2348) | ||
971 | +Test: [0/14] Time 1.123 (1.123) Loss 0.1061 (0.1061) Prec@1 94.922 (94.922) | ||
972 | +Test: [1/14] Time 0.054 (0.588) Loss 0.1432 (0.1246) Prec@1 94.141 (94.531) | ||
973 | +Test: [2/14] Time 0.033 (0.403) Loss 0.0830 (0.1107) Prec@1 97.656 (95.573) | ||
974 | +Test: [3/14] Time 0.017 (0.307) Loss 0.0947 (0.1067) Prec@1 96.094 (95.703) | ||
975 | +Test: [4/14] Time 0.027 (0.251) Loss 0.1500 (0.1154) Prec@1 92.969 (95.156) | ||
976 | +Test: [5/14] Time 0.018 (0.212) Loss 0.1165 (0.1156) Prec@1 95.703 (95.247) | ||
977 | +Test: [6/14] Time 0.029 (0.186) Loss 0.1491 (0.1204) Prec@1 94.531 (95.145) | ||
978 | +Test: [7/14] Time 0.056 (0.170) Loss 0.1830 (0.1282) Prec@1 91.797 (94.727) | ||
979 | +Test: [8/14] Time 0.041 (0.155) Loss 0.1621 (0.1320) Prec@1 94.531 (94.705) | ||
980 | +Test: [9/14] Time 0.025 (0.142) Loss 0.0904 (0.1278) Prec@1 96.484 (94.883) | ||
981 | +Test: [10/14] Time 0.030 (0.132) Loss 0.1743 (0.1320) Prec@1 91.016 (94.531) | ||
982 | +Test: [11/14] Time 0.016 (0.122) Loss 0.1148 (0.1306) Prec@1 95.312 (94.596) | ||
983 | +Test: [12/14] Time 0.017 (0.114) Loss 0.1283 (0.1304) Prec@1 94.922 (94.621) | ||
984 | +Test: [13/14] Time 0.096 (0.113) Loss 0.1226 (0.1302) Prec@1 95.699 (94.651) | ||
985 | + * Prec@1 94.651 | ||
986 | +Creating CAM | ||
987 | +Best accuracy: 95.3216364899574 | ||
988 | +[eval] done | ||
989 | +Number of model parameters: 154706 | ||
990 | +=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
991 | +=> loaded checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2348) | ||
992 | +Test: [0/14] Time 0.926 (0.926) Loss 0.1238 (0.1238) Prec@1 94.922 (94.922) | ||
993 | +Test: [1/14] Time 0.039 (0.483) Loss 0.1387 (0.1312) Prec@1 94.141 (94.531) | ||
994 | +Test: [2/14] Time 0.020 (0.329) Loss 0.1748 (0.1458) Prec@1 92.578 (93.880) | ||
995 | +Test: [3/14] Time 0.012 (0.249) Loss 0.0883 (0.1314) Prec@1 97.266 (94.727) | ||
996 | +Test: [4/14] Time 0.015 (0.203) Loss 0.1191 (0.1289) Prec@1 95.703 (94.922) | ||
997 | +Test: [5/14] Time 0.014 (0.171) Loss 0.1319 (0.1294) Prec@1 94.922 (94.922) | ||
998 | +Test: [6/14] Time 0.036 (0.152) Loss 0.1553 (0.1331) Prec@1 92.578 (94.587) | ||
999 | +Test: [7/14] Time 0.013 (0.135) Loss 0.1337 (0.1332) Prec@1 95.312 (94.678) | ||
1000 | +Test: [8/14] Time 0.026 (0.123) Loss 0.1282 (0.1327) Prec@1 94.922 (94.705) | ||
1001 | +Test: [9/14] Time 0.031 (0.113) Loss 0.1576 (0.1352) Prec@1 93.750 (94.609) | ||
1002 | +Test: [10/14] Time 0.029 (0.106) Loss 0.1193 (0.1337) Prec@1 95.312 (94.673) | ||
1003 | +Test: [11/14] Time 0.025 (0.099) Loss 0.1008 (0.1310) Prec@1 95.703 (94.759) | ||
1004 | +Test: [12/14] Time 0.023 (0.093) Loss 0.1282 (0.1307) Prec@1 93.750 (94.681) | ||
1005 | +Test: [13/14] Time 0.084 (0.093) Loss 0.1103 (0.1302) Prec@1 93.548 (94.651) | ||
1006 | + * Prec@1 94.651 | ||
1007 | +Creating CAM | ||
1008 | +Best accuracy: 95.3216364899574 | ||
1009 | +[eval] done | ||
1010 | +Number of model parameters: 154706 | ||
1011 | +=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1012 | +=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389) | ||
1013 | +Test: [0/2] Time 1.166 (1.166) Loss 0.1193 (0.1193) Prec@1 96.094 (96.094) | ||
1014 | +Test: [1/2] Time 0.040 (0.603) Loss 0.1689 (0.1317) Prec@1 95.349 (95.906) | ||
1015 | + * Prec@1 95.906 | ||
1016 | +Creating CAM | ||
1017 | +Best accuracy: 95.90643257007264 | ||
1018 | +[eval] done | ||
1019 | +Number of model parameters: 154706 | ||
1020 | +=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1021 | +=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389) | ||
1022 | +Test: [0/2] Time 1.166 (1.166) Loss 0.1175 (0.1175) Prec@1 96.094 (96.094) | ||
1023 | +Test: [1/2] Time 0.049 (0.607) Loss 0.1742 (0.1317) Prec@1 95.349 (95.906) | ||
1024 | + * Prec@1 95.906 | ||
1025 | +Creating CAM | ||
1026 | +Best accuracy: 95.90643257007264 | ||
1027 | +[eval] done | ||
1028 | +Number of model parameters: 154706 | ||
1029 | +=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1030 | +=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389) | ||
1031 | +Test: [0/14] Time 1.217 (1.217) Loss 0.2104 (0.2104) Prec@1 89.844 (89.844) | ||
1032 | +Test: [1/14] Time 0.019 (0.618) Loss 0.1402 (0.1753) Prec@1 94.531 (92.188) | ||
1033 | +Test: [2/14] Time 0.012 (0.416) Loss 0.1672 (0.1726) Prec@1 93.359 (92.578) | ||
1034 | +Test: [3/14] Time 0.011 (0.315) Loss 0.1261 (0.1610) Prec@1 94.531 (93.066) | ||
1035 | +Test: [4/14] Time 0.011 (0.254) Loss 0.1214 (0.1531) Prec@1 95.312 (93.516) | ||
1036 | +Test: [5/14] Time 0.012 (0.214) Loss 0.1885 (0.1590) Prec@1 92.578 (93.359) | ||
1037 | +Test: [6/14] Time 0.012 (0.185) Loss 0.1832 (0.1624) Prec@1 92.188 (93.192) | ||
1038 | +Test: [7/14] Time 0.011 (0.163) Loss 0.1629 (0.1625) Prec@1 93.359 (93.213) | ||
1039 | +Test: [8/14] Time 0.010 (0.146) Loss 0.1113 (0.1568) Prec@1 95.703 (93.490) | ||
1040 | +Test: [9/14] Time 0.012 (0.133) Loss 0.1671 (0.1578) Prec@1 92.188 (93.359) | ||
1041 | +Test: [10/14] Time 0.011 (0.122) Loss 0.1282 (0.1551) Prec@1 94.922 (93.501) | ||
1042 | +Test: [11/14] Time 0.011 (0.112) Loss 0.1508 (0.1548) Prec@1 93.750 (93.522) | ||
1043 | +Test: [12/14] Time 0.013 (0.105) Loss 0.2146 (0.1594) Prec@1 91.406 (93.359) | ||
1044 | +Test: [13/14] Time 0.037 (0.100) Loss 0.1688 (0.1596) Prec@1 93.548 (93.365) | ||
1045 | + * Prec@1 93.365 | ||
1046 | +Creating CAM | ||
1047 | +Best accuracy: 95.90643257007264 | ||
1048 | +[eval] done | ||
1049 | +Number of model parameters: 154706 | ||
1050 | +=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
1051 | +=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
1052 | +Test: [0/14] Time 1.085 (1.085) Loss 0.2096 (0.2096) Prec@1 95.312 (95.312) | ||
1053 | +Test: [1/14] Time 0.039 (0.562) Loss 0.2121 (0.2109) Prec@1 94.531 (94.922) | ||
1054 | +Test: [2/14] Time 0.016 (0.380) Loss 0.2051 (0.2090) Prec@1 96.094 (95.312) | ||
1055 | +Test: [3/14] Time 0.014 (0.289) Loss 0.2188 (0.2114) Prec@1 92.188 (94.531) | ||
1056 | +Test: [4/14] Time 0.012 (0.233) Loss 0.2191 (0.2130) Prec@1 93.750 (94.375) | ||
1057 | +Test: [5/14] Time 0.011 (0.196) Loss 0.2372 (0.2170) Prec@1 93.750 (94.271) | ||
1058 | +Test: [6/14] Time 0.012 (0.170) Loss 0.2187 (0.2172) Prec@1 92.578 (94.029) | ||
1059 | +Test: [7/14] Time 0.010 (0.150) Loss 0.1857 (0.2133) Prec@1 95.312 (94.189) | ||
1060 | +Test: [8/14] Time 0.010 (0.134) Loss 0.2194 (0.2140) Prec@1 95.312 (94.314) | ||
1061 | +Test: [9/14] Time 0.011 (0.122) Loss 0.2345 (0.2160) Prec@1 92.969 (94.180) | ||
1062 | +Test: [10/14] Time 0.012 (0.112) Loss 0.2408 (0.2183) Prec@1 92.188 (93.999) | ||
1063 | +Test: [11/14] Time 0.011 (0.104) Loss 0.2273 (0.2190) Prec@1 93.750 (93.978) | ||
1064 | +Test: [12/14] Time 0.011 (0.097) Loss 0.2092 (0.2183) Prec@1 94.141 (93.990) | ||
1065 | +Test: [13/14] Time 0.044 (0.093) Loss 0.2458 (0.2190) Prec@1 90.323 (93.891) | ||
1066 | + * Prec@1 93.891 | ||
1067 | +Creating CAM | ||
1068 | +Best accuracy: 93.89067538834844 | ||
1069 | +[eval] done | ||
1070 | +Number of model parameters: 154706 | ||
1071 | +=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
1072 | +=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
1073 | +Test: [0/2] Time 1.041 (1.041) Loss 0.2489 (0.2489) Prec@1 91.406 (91.406) | ||
1074 | +Test: [1/2] Time 0.052 (0.546) Loss 0.2858 (0.2582) Prec@1 86.047 (90.058) | ||
1075 | + * Prec@1 90.058 | ||
1076 | +Creating CAM | ||
1077 | +Best accuracy: 93.27485362270423 | ||
1078 | +[eval] done | ||
1079 | +Number of model parameters: 154706 | ||
1080 | +=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1081 | +=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2117) | ||
1082 | +Test: [0/2] Time 1.039 (1.039) Loss 0.2551 (0.2551) Prec@1 93.359 (93.359) | ||
1083 | +Test: [1/2] Time 0.061 (0.550) Loss 0.2621 (0.2569) Prec@1 93.023 (93.275) | ||
1084 | + * Prec@1 93.275 | ||
1085 | +Creating CAM | ||
1086 | +Best accuracy: 93.27485362270423 | ||
1087 | +[eval] done | ||
1088 | +Number of model parameters: 154706 | ||
1089 | +=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1090 | +=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2117) | ||
1091 | +Test: [0/14] Time 1.156 (1.156) Loss 0.2277 (0.2277) Prec@1 94.531 (94.531) | ||
1092 | +Test: [1/14] Time 0.020 (0.588) Loss 0.2137 (0.2207) Prec@1 94.531 (94.531) | ||
1093 | +Test: [2/14] Time 0.014 (0.397) Loss 0.2680 (0.2365) Prec@1 91.016 (93.359) | ||
1094 | +Test: [3/14] Time 0.013 (0.301) Loss 0.2404 (0.2375) Prec@1 92.188 (93.066) | ||
1095 | +Test: [4/14] Time 0.013 (0.243) Loss 0.2448 (0.2389) Prec@1 91.406 (92.734) | ||
1096 | +Test: [5/14] Time 0.014 (0.205) Loss 0.2563 (0.2418) Prec@1 89.844 (92.253) | ||
1097 | +Test: [6/14] Time 0.014 (0.178) Loss 0.2798 (0.2473) Prec@1 89.453 (91.853) | ||
1098 | +Test: [7/14] Time 0.012 (0.157) Loss 0.2298 (0.2451) Prec@1 92.969 (91.992) | ||
1099 | +Test: [8/14] Time 0.011 (0.141) Loss 0.2395 (0.2445) Prec@1 94.531 (92.274) | ||
1100 | +Test: [9/14] Time 0.013 (0.128) Loss 0.2545 (0.2455) Prec@1 90.625 (92.109) | ||
1101 | +Test: [10/14] Time 0.021 (0.118) Loss 0.2275 (0.2438) Prec@1 93.359 (92.223) | ||
1102 | +Test: [11/14] Time 0.015 (0.110) Loss 0.2461 (0.2440) Prec@1 91.406 (92.155) | ||
1103 | +Test: [12/14] Time 0.010 (0.102) Loss 0.2224 (0.2424) Prec@1 94.531 (92.338) | ||
1104 | +Test: [13/14] Time 0.036 (0.097) Loss 0.2299 (0.2420) Prec@1 92.473 (92.341) | ||
1105 | + * Prec@1 92.341 | ||
1106 | +Creating CAM | ||
1107 | +Best accuracy: 93.27485362270423 | ||
1108 | +[eval] done | ||
1109 | +Number of model parameters: 154706 | ||
1110 | +=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
1111 | +=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
1112 | +Test: [0/14] Time 1.152 (1.152) Loss 0.2329 (0.2329) Prec@1 93.359 (93.359) | ||
1113 | +Test: [1/14] Time 0.018 (0.585) Loss 0.2266 (0.2298) Prec@1 94.531 (93.945) | ||
1114 | +Test: [2/14] Time 0.013 (0.394) Loss 0.2336 (0.2310) Prec@1 91.797 (93.229) | ||
1115 | +Test: [3/14] Time 0.012 (0.299) Loss 0.2259 (0.2298) Prec@1 92.969 (93.164) | ||
1116 | +Test: [4/14] Time 0.012 (0.241) Loss 0.2114 (0.2261) Prec@1 93.750 (93.281) | ||
1117 | +Test: [5/14] Time 0.012 (0.203) Loss 0.2309 (0.2269) Prec@1 92.188 (93.099) | ||
1118 | +Test: [6/14] Time 0.010 (0.175) Loss 0.1863 (0.2211) Prec@1 96.484 (93.583) | ||
1119 | +Test: [7/14] Time 0.012 (0.155) Loss 0.2356 (0.2229) Prec@1 92.969 (93.506) | ||
1120 | +Test: [8/14] Time 0.012 (0.139) Loss 0.2073 (0.2212) Prec@1 92.969 (93.446) | ||
1121 | +Test: [9/14] Time 0.010 (0.126) Loss 0.2034 (0.2194) Prec@1 96.484 (93.750) | ||
1122 | +Test: [10/14] Time 0.011 (0.116) Loss 0.2154 (0.2190) Prec@1 94.922 (93.857) | ||
1123 | +Test: [11/14] Time 0.012 (0.107) Loss 0.2193 (0.2191) Prec@1 93.359 (93.815) | ||
1124 | +Test: [12/14] Time 0.011 (0.100) Loss 0.2111 (0.2184) Prec@1 94.141 (93.840) | ||
1125 | +Test: [13/14] Time 0.052 (0.096) Loss 0.2402 (0.2190) Prec@1 95.699 (93.891) | ||
1126 | + * Prec@1 93.891 | ||
1127 | +Best accuracy: 93.89067535266581 | ||
1128 | +[eval] done | ||
1129 | +Number of model parameters: 154706 | ||
1130 | +=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1131 | +=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389) | ||
1132 | +Test: [0/2] Time 1.056 (1.056) Loss 0.1371 (0.1371) Prec@1 95.703 (95.703) | ||
1133 | +Test: [1/2] Time 0.057 (0.556) Loss 0.1158 (0.1317) Prec@1 96.512 (95.906) | ||
1134 | + * Prec@1 95.906 | ||
1135 | +Best accuracy: 95.90643257007264 | ||
1136 | +[eval] done | ||
1137 | +Number of model parameters: 154706 | ||
1138 | +=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1139 | +=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389) | ||
1140 | +Test: [0/2] Time 1.038 (1.038) Loss 0.1294 (0.1294) Prec@1 96.094 (96.094) | ||
1141 | +Test: [1/2] Time 0.039 (0.538) Loss 0.1388 (0.1317) Prec@1 95.349 (95.906) | ||
1142 | + * Prec@1 95.906 | ||
1143 | +Best accuracy: 95.90643257007264 | ||
1144 | +[eval] done | ||
1145 | +Number of model parameters: 154706 | ||
1146 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1147 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
1148 | +Test: [0/2] Time 0.814 (0.814) Loss 0.0781 (0.0781) Prec@1 98.438 (98.438) | ||
1149 | +Test: [1/2] Time 0.047 (0.430) Loss 0.1064 (0.0846) Prec@1 97.368 (98.193) | ||
1150 | + * Prec@1 98.193 | ||
1151 | +Best accuracy: 98.19277163585984 | ||
1152 | +[eval] done | ||
1153 | +Number of model parameters: 154706 | ||
1154 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
1155 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
1156 | +Test: [0/2] Time 0.796 (0.796) Loss 0.0715 (0.0715) Prec@1 98.047 (98.047) | ||
1157 | +Test: [1/2] Time 0.046 (0.421) Loss 0.1287 (0.0846) Prec@1 98.684 (98.193) | ||
1158 | + * Prec@1 98.193 | ||
1159 | +Best accuracy: 98.1927713600986 | ||
1160 | +[eval] done |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | +Fatal error in main loop | ||
5 | +Traceback (most recent call last): | ||
6 | + File "/home/yh9468/detection/trainer/test.py", line 181, in main | ||
7 | + run_model(args, q) | ||
8 | + File "/home/yh9468/detection/trainer/test.py", line 263, in run_model | ||
9 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
10 | + File "/home/yh9468/detection/trainer/test.py", line 296, in validate | ||
11 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True) | ||
12 | + File "/home/yh9468/detection/trainer/test.py", line 373, in save_error_case | ||
13 | + cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img) | ||
14 | +NameError: name 'correct_case_idx' is not defined | ||
15 | +[validate_2020-03-26-16-50-29] failed |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | +Fatal error in main loop | ||
5 | +Traceback (most recent call last): | ||
6 | + File "/home/yh9468/detection/trainer/test.py", line 184, in main | ||
7 | + run_model(args, q) | ||
8 | + File "/home/yh9468/detection/trainer/test.py", line 266, in run_model | ||
9 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
10 | + File "/home/yh9468/detection/trainer/test.py", line 299, in validate | ||
11 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True) | ||
12 | + File "/home/yh9468/detection/trainer/test.py", line 376, in save_error_case | ||
13 | + cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img) | ||
14 | +NameError: name 'correct_case_idx' is not defined | ||
15 | +[validate_2020-03-26-16-55-37] failed |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | + * Prec@1 98.193 | ||
5 | + * Prec@1 98.193 | ||
6 | +Best accuracy: 98.19277163585984 | ||
7 | +[validate_2020-03-26-16-57-52] done | ||
8 | +[validate_2020-03-26-16-57-52] done | ||
9 | +start test using path : ../data/Fourth_data/demo | ||
10 | +Test start | ||
11 | +loading checkpoint... | ||
12 | +checkpoint already loaded! |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | + * Prec@1 98.193 | ||
5 | + * Prec@1 98.193 | ||
6 | +Best accuracy: 98.1927713600986 | ||
7 | +[validate_2020-03-26-17-02-22] done | ||
8 | +[validate_2020-03-26-17-02-22] done | ||
9 | +start test using path : ../data/Fourth_data/demo | ||
10 | +Test start | ||
11 | +loading checkpoint... | ||
12 | +checkpoint already loaded! | ||
13 | +start test | ||
14 | +data path directory is ../data/Fourth_data/demo | ||
15 | +finish test | ||
16 | +start test using path : ../data/Fourth_data/demo | ||
17 | +Test start | ||
18 | +loading checkpoint... | ||
19 | +checkpoint already loaded! | ||
20 | +start test | ||
21 | +data path directory is ../data/Fourth_data/demo | ||
22 | +finish test |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | + * Prec@1 98.193 | ||
5 | + * Prec@1 98.193 | ||
6 | +Best accuracy: 98.1927713600986 | ||
7 | +[validate_2020-03-26-17-09-14] done | ||
8 | +[validate_2020-03-26-17-09-14] done | ||
9 | +Number of model parameters: 154706 | ||
10 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
11 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
12 | + * Prec@1 98.193 | ||
13 | + * Prec@1 98.193 | ||
14 | +Best accuracy: 98.19277163585984 | ||
15 | +[validate_2020-03-26-17-09-34] done | ||
16 | +[validate_2020-03-26-17-09-34] done | ||
17 | +start test using path : ../data/Fourth_data/demo | ||
18 | +Test start | ||
19 | +loading checkpoint... | ||
20 | +checkpoint already loaded! | ||
21 | +start test | ||
22 | +data path directory is ../data/Fourth_data/demo | ||
23 | +finish test | ||
24 | +set Type | ||
25 | +start test using path : ../data/Fourth_data/demo | ||
26 | +Test start | ||
27 | +loading checkpoint... | ||
28 | +checkpoint already loaded! | ||
29 | +start test | ||
30 | +data path directory is ../data/Fourth_data/demo | ||
31 | +finish test | ||
32 | +train start | ||
33 | +load yml file | ||
34 | +2020-03-26-17-10-28 | ||
35 | +use seed 825 | ||
36 | +use dataset : ../data/Fourth_data/All | ||
37 | +{'task': 'All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam-class': 'Crack', 'cam': False, 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-26-17-10-28'} | ||
38 | +using normalize | ||
39 | +using no dropout | ||
40 | +using SGD | ||
41 | +Number of model parameters: 461559 | ||
42 | +Epoch: [0][0/12] Time 3.022 (3.022) Loss 1.9330 (1.9330) Prec@1 16.016 (16.016) | ||
43 | +Epoch: [0][10/12] Time 0.159 (0.411) Loss 1.6084 (1.7482) Prec@1 23.828 (17.898) | ||
44 | +Test: [0/2] Time 1.651 (1.651) Loss 1.8596 (1.8596) Prec@1 10.938 (10.938) | ||
45 | + * epoch: 0 Prec@1 12.012 | ||
46 | + * epoch: 0 Prec@1 12.012 | ||
47 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
48 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
49 | +Epoch: [1][0/12] Time 1.962 (1.962) Loss 1.5773 (1.5773) Prec@1 28.516 (28.516) | ||
50 | +Epoch: [1][10/12] Time 0.158 (0.323) Loss 1.6759 (1.5514) Prec@1 20.312 (26.136) | ||
51 | +Test: [0/2] Time 1.653 (1.653) Loss 1.8462 (1.8462) Prec@1 31.250 (31.250) | ||
52 | + * epoch: 1 Prec@1 30.030 | ||
53 | + * epoch: 1 Prec@1 30.030 | ||
54 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
55 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
56 | +Epoch: [2][0/12] Time 1.952 (1.952) Loss 1.5458 (1.5458) Prec@1 26.562 (26.562) | ||
57 | +Epoch: [2][10/12] Time 0.157 (0.331) Loss 1.2252 (1.3803) Prec@1 42.969 (31.889) | ||
58 | +Test: [0/2] Time 1.674 (1.674) Loss 1.7407 (1.7407) Prec@1 30.859 (30.859) | ||
59 | + * epoch: 2 Prec@1 32.733 | ||
60 | + * epoch: 2 Prec@1 32.733 | ||
61 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
62 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
63 | +Epoch: [3][0/12] Time 1.960 (1.960) Loss 1.2237 (1.2237) Prec@1 39.062 (39.062) | ||
64 | +Epoch: [3][10/12] Time 0.155 (0.323) Loss 1.1566 (1.2237) Prec@1 41.797 (41.193) | ||
65 | +Test: [0/2] Time 1.687 (1.687) Loss 1.7368 (1.7368) Prec@1 22.266 (22.266) | ||
66 | + * epoch: 3 Prec@1 22.823 | ||
67 | + * epoch: 3 Prec@1 22.823 | ||
68 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
69 | +Epoch: [4][0/12] Time 1.967 (1.967) Loss 1.3065 (1.3065) Prec@1 48.828 (48.828) | ||
70 | +Epoch: [4][10/12] Time 0.162 (0.335) Loss 1.0501 (1.1183) Prec@1 58.594 (53.906) | ||
71 | +Test: [0/2] Time 1.665 (1.665) Loss 1.1960 (1.1960) Prec@1 53.516 (53.516) | ||
72 | + * epoch: 4 Prec@1 52.553 | ||
73 | + * epoch: 4 Prec@1 52.553 | ||
74 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
75 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
76 | +Epoch: [5][0/12] Time 1.930 (1.930) Loss 1.0283 (1.0283) Prec@1 61.719 (61.719) | ||
77 | +Epoch: [5][10/12] Time 0.159 (0.320) Loss 1.0155 (1.0390) Prec@1 58.594 (60.227) | ||
78 | +Test: [0/2] Time 1.692 (1.692) Loss 1.2309 (1.2309) Prec@1 60.938 (60.938) | ||
79 | + * epoch: 5 Prec@1 60.360 | ||
80 | + * epoch: 5 Prec@1 60.360 | ||
81 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
82 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
83 | +Epoch: [6][0/12] Time 2.214 (2.214) Loss 0.9515 (0.9515) Prec@1 64.844 (64.844) | ||
84 | +Epoch: [6][10/12] Time 0.158 (0.346) Loss 0.7833 (0.9050) Prec@1 75.781 (68.999) | ||
85 | +Test: [0/2] Time 1.663 (1.663) Loss 0.9625 (0.9625) Prec@1 49.609 (49.609) | ||
86 | + * epoch: 6 Prec@1 49.850 | ||
87 | + * epoch: 6 Prec@1 49.850 | ||
88 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
89 | +Epoch: [7][0/12] Time 1.919 (1.919) Loss 0.7668 (0.7668) Prec@1 76.172 (76.172) | ||
90 | +Epoch: [7][10/12] Time 0.158 (0.319) Loss 0.9260 (0.8527) Prec@1 63.672 (71.768) | ||
91 | +Test: [0/2] Time 1.675 (1.675) Loss 1.1891 (1.1891) Prec@1 61.328 (61.328) | ||
92 | + * epoch: 7 Prec@1 58.859 | ||
93 | + * epoch: 7 Prec@1 58.859 | ||
94 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
95 | +Epoch: [8][0/12] Time 2.244 (2.244) Loss 0.8546 (0.8546) Prec@1 72.266 (72.266) | ||
96 | +Epoch: [8][10/12] Time 0.159 (0.348) Loss 0.7712 (0.8669) Prec@1 76.562 (71.094) | ||
97 | +Test: [0/2] Time 1.707 (1.707) Loss 0.9092 (0.9092) Prec@1 63.281 (63.281) | ||
98 | + * epoch: 8 Prec@1 62.462 | ||
99 | + * epoch: 8 Prec@1 62.462 | ||
100 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
101 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
102 | +Epoch: [9][0/12] Time 1.925 (1.925) Loss 0.7878 (0.7878) Prec@1 78.516 (78.516) | ||
103 | +Epoch: [9][10/12] Time 0.160 (0.325) Loss 0.7174 (0.7807) Prec@1 69.922 (73.509) | ||
104 | +Test: [0/2] Time 1.707 (1.707) Loss 0.9544 (0.9544) Prec@1 62.891 (62.891) | ||
105 | + * epoch: 9 Prec@1 60.661 | ||
106 | + * epoch: 9 Prec@1 60.661 | ||
107 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
108 | +Epoch: [10][0/12] Time 2.220 (2.220) Loss 0.9374 (0.9374) Prec@1 67.188 (67.188) | ||
109 | +Epoch: [10][10/12] Time 0.159 (0.347) Loss 0.7273 (0.7483) Prec@1 80.078 (75.497) | ||
110 | +Test: [0/2] Time 1.696 (1.696) Loss 0.9895 (0.9895) Prec@1 47.656 (47.656) | ||
111 | + * epoch: 10 Prec@1 48.649 | ||
112 | + * epoch: 10 Prec@1 48.649 | ||
113 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
114 | +Epoch: [11][0/12] Time 1.974 (1.974) Loss 0.6920 (0.6920) Prec@1 75.391 (75.391) | ||
115 | +Epoch: [11][10/12] Time 0.154 (0.324) Loss 0.7892 (0.6962) Prec@1 71.484 (75.781) | ||
116 | +Test: [0/2] Time 1.665 (1.665) Loss 1.1736 (1.1736) Prec@1 78.125 (78.125) | ||
117 | + * epoch: 11 Prec@1 77.778 | ||
118 | + * epoch: 11 Prec@1 77.778 | ||
119 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
120 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
121 | +Epoch: [12][0/12] Time 1.927 (1.927) Loss 0.7375 (0.7375) Prec@1 78.906 (78.906) | ||
122 | +Epoch: [12][10/12] Time 0.159 (0.328) Loss 0.6453 (0.6972) Prec@1 79.688 (77.308) | ||
123 | +Test: [0/2] Time 1.703 (1.703) Loss 0.7701 (0.7701) Prec@1 79.688 (79.688) | ||
124 | + * epoch: 12 Prec@1 79.279 | ||
125 | + * epoch: 12 Prec@1 79.279 | ||
126 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
127 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
128 | +Epoch: [13][0/12] Time 1.983 (1.983) Loss 0.6737 (0.6737) Prec@1 79.688 (79.688) | ||
129 | +Epoch: [13][10/12] Time 0.158 (0.325) Loss 0.6125 (0.6647) Prec@1 79.688 (78.942) | ||
130 | +Test: [0/2] Time 1.673 (1.673) Loss 1.7411 (1.7411) Prec@1 44.922 (44.922) | ||
131 | + * epoch: 13 Prec@1 44.745 | ||
132 | + * epoch: 13 Prec@1 44.745 | ||
133 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
134 | +Epoch: [14][0/12] Time 1.969 (1.969) Loss 0.7033 (0.7033) Prec@1 76.953 (76.953) | ||
135 | +Epoch: [14][10/12] Time 0.160 (0.328) Loss 0.7069 (0.6706) Prec@1 78.516 (78.196) | ||
136 | +Test: [0/2] Time 1.689 (1.689) Loss 0.7330 (0.7330) Prec@1 80.078 (80.078) | ||
137 | + * epoch: 14 Prec@1 80.781 | ||
138 | + * epoch: 14 Prec@1 80.781 | ||
139 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
140 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
141 | +Epoch: [15][0/12] Time 1.957 (1.957) Loss 0.6477 (0.6477) Prec@1 76.562 (76.562) | ||
142 | +Epoch: [15][10/12] Time 0.160 (0.323) Loss 0.6251 (0.6128) Prec@1 81.641 (80.007) | ||
143 | +Test: [0/2] Time 1.675 (1.675) Loss 0.7821 (0.7821) Prec@1 76.562 (76.562) | ||
144 | + * epoch: 15 Prec@1 76.877 | ||
145 | + * epoch: 15 Prec@1 76.877 | ||
146 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
147 | +Epoch: [16][0/12] Time 2.133 (2.133) Loss 0.6314 (0.6314) Prec@1 77.734 (77.734) | ||
148 | +Epoch: [16][10/12] Time 0.159 (0.338) Loss 0.6333 (0.6552) Prec@1 80.469 (79.190) | ||
149 | +Test: [0/2] Time 1.686 (1.686) Loss 0.8334 (0.8334) Prec@1 81.641 (81.641) | ||
150 | + * epoch: 16 Prec@1 81.982 | ||
151 | + * epoch: 16 Prec@1 81.982 | ||
152 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
153 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
154 | +Epoch: [17][0/12] Time 1.903 (1.903) Loss 0.5610 (0.5610) Prec@1 79.297 (79.297) | ||
155 | +Epoch: [17][10/12] Time 0.158 (0.324) Loss 0.5548 (0.5905) Prec@1 77.734 (81.001) | ||
156 | +Test: [0/2] Time 1.713 (1.713) Loss 0.7781 (0.7781) Prec@1 82.422 (82.422) | ||
157 | + * epoch: 17 Prec@1 81.381 | ||
158 | + * epoch: 17 Prec@1 81.381 | ||
159 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
160 | +Epoch: [18][0/12] Time 1.914 (1.914) Loss 0.5484 (0.5484) Prec@1 77.344 (77.344) | ||
161 | +Epoch: [18][10/12] Time 0.159 (0.320) Loss 0.6511 (0.6283) Prec@1 79.688 (79.545) | ||
162 | +Test: [0/2] Time 1.666 (1.666) Loss 0.8708 (0.8708) Prec@1 62.500 (62.500) | ||
163 | + * epoch: 18 Prec@1 63.664 | ||
164 | + * epoch: 18 Prec@1 63.664 | ||
165 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
166 | +Epoch: [19][0/12] Time 1.918 (1.918) Loss 0.6416 (0.6416) Prec@1 79.297 (79.297) | ||
167 | +Epoch: [19][10/12] Time 0.158 (0.320) Loss 0.5400 (0.5515) Prec@1 83.203 (83.097) | ||
168 | +Test: [0/2] Time 1.694 (1.694) Loss 0.6527 (0.6527) Prec@1 81.250 (81.250) | ||
169 | + * epoch: 19 Prec@1 81.081 | ||
170 | + * epoch: 19 Prec@1 81.081 | ||
171 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
172 | +Epoch: [20][0/12] Time 2.206 (2.206) Loss 0.5358 (0.5358) Prec@1 81.641 (81.641) | ||
173 | +Epoch: [20][10/12] Time 0.160 (0.345) Loss 0.6041 (0.5729) Prec@1 77.734 (81.428) | ||
174 | +Test: [0/2] Time 1.720 (1.720) Loss 0.5669 (0.5669) Prec@1 83.594 (83.594) | ||
175 | + * epoch: 20 Prec@1 81.682 | ||
176 | + * epoch: 20 Prec@1 81.682 | ||
177 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
178 | +Epoch: [21][0/12] Time 2.203 (2.203) Loss 0.4921 (0.4921) Prec@1 82.422 (82.422) | ||
179 | +Epoch: [21][10/12] Time 0.158 (0.345) Loss 0.4483 (0.5416) Prec@1 84.375 (82.919) | ||
180 | +Test: [0/2] Time 1.706 (1.706) Loss 0.7614 (0.7614) Prec@1 78.516 (78.516) | ||
181 | + * epoch: 21 Prec@1 78.679 | ||
182 | + * epoch: 21 Prec@1 78.679 | ||
183 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
184 | +Epoch: [22][0/12] Time 1.921 (1.921) Loss 0.6461 (0.6461) Prec@1 81.250 (81.250) | ||
185 | +Epoch: [22][10/12] Time 0.158 (0.319) Loss 0.5921 (0.5490) Prec@1 82.031 (82.919) | ||
186 | +Test: [0/2] Time 1.709 (1.709) Loss 0.6422 (0.6422) Prec@1 82.812 (82.812) | ||
187 | + * epoch: 22 Prec@1 83.784 | ||
188 | + * epoch: 22 Prec@1 83.784 | ||
189 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
190 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
191 | +Epoch: [23][0/12] Time 1.939 (1.939) Loss 0.4573 (0.4573) Prec@1 87.500 (87.500) | ||
192 | +Epoch: [23][10/12] Time 0.159 (0.331) Loss 0.5843 (0.5141) Prec@1 80.469 (84.624) | ||
193 | +Test: [0/2] Time 1.685 (1.685) Loss 0.9102 (0.9102) Prec@1 75.000 (75.000) | ||
194 | + * epoch: 23 Prec@1 76.577 | ||
195 | + * epoch: 23 Prec@1 76.577 | ||
196 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
197 | +Epoch: [24][0/12] Time 1.925 (1.925) Loss 0.7865 (0.7865) Prec@1 78.125 (78.125) | ||
198 | +Epoch: [24][10/12] Time 0.160 (0.319) Loss 0.5571 (0.6079) Prec@1 85.938 (80.859) | ||
199 | +Test: [0/2] Time 1.712 (1.712) Loss 0.5972 (0.5972) Prec@1 82.422 (82.422) | ||
200 | + * epoch: 24 Prec@1 82.583 | ||
201 | + * epoch: 24 Prec@1 82.583 | ||
202 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
203 | +Epoch: [25][0/12] Time 1.958 (1.958) Loss 0.5632 (0.5632) Prec@1 83.984 (83.984) | ||
204 | +Epoch: [25][10/12] Time 0.159 (0.322) Loss 0.3754 (0.5158) Prec@1 88.281 (83.700) | ||
205 | +Test: [0/2] Time 1.701 (1.701) Loss 0.8734 (0.8734) Prec@1 77.734 (77.734) | ||
206 | + * epoch: 25 Prec@1 78.378 | ||
207 | + * epoch: 25 Prec@1 78.378 | ||
208 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
209 | +Epoch: [26][0/12] Time 2.162 (2.162) Loss 0.6764 (0.6764) Prec@1 81.641 (81.641) | ||
210 | +Epoch: [26][10/12] Time 0.160 (0.342) Loss 0.5274 (0.5528) Prec@1 80.469 (82.209) | ||
211 | +Test: [0/2] Time 1.696 (1.696) Loss 0.6240 (0.6240) Prec@1 85.938 (85.938) | ||
212 | + * epoch: 26 Prec@1 84.084 | ||
213 | + * epoch: 26 Prec@1 84.084 | ||
214 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
215 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
216 | +Epoch: [27][0/12] Time 2.181 (2.181) Loss 0.5241 (0.5241) Prec@1 83.203 (83.203) | ||
217 | +Epoch: [27][10/12] Time 0.158 (0.343) Loss 0.6147 (0.5227) Prec@1 79.297 (83.736) | ||
218 | +Test: [0/2] Time 1.708 (1.708) Loss 0.7268 (0.7268) Prec@1 80.469 (80.469) | ||
219 | + * epoch: 27 Prec@1 79.279 | ||
220 | + * epoch: 27 Prec@1 79.279 | ||
221 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
222 | +Epoch: [28][0/12] Time 1.966 (1.966) Loss 0.4439 (0.4439) Prec@1 87.500 (87.500) | ||
223 | +Epoch: [28][10/12] Time 0.159 (0.323) Loss 0.5766 (0.5243) Prec@1 85.547 (82.955) | ||
224 | +Test: [0/2] Time 1.684 (1.684) Loss 0.6416 (0.6416) Prec@1 82.031 (82.031) | ||
225 | + * epoch: 28 Prec@1 82.583 | ||
226 | + * epoch: 28 Prec@1 82.583 | ||
227 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
228 | +Epoch: [29][0/12] Time 1.943 (1.943) Loss 0.5633 (0.5633) Prec@1 79.297 (79.297) | ||
229 | +Epoch: [29][10/12] Time 0.159 (0.321) Loss 0.4740 (0.4708) Prec@1 85.547 (84.730) | ||
230 | +Test: [0/2] Time 1.704 (1.704) Loss 0.6950 (0.6950) Prec@1 78.125 (78.125) | ||
231 | + * epoch: 29 Prec@1 78.378 | ||
232 | + * epoch: 29 Prec@1 78.378 | ||
233 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
234 | +Epoch: [30][0/12] Time 1.953 (1.953) Loss 0.6137 (0.6137) Prec@1 82.422 (82.422) | ||
235 | +Epoch: [30][10/12] Time 0.159 (0.322) Loss 0.3596 (0.5028) Prec@1 87.891 (84.553) | ||
236 | +Test: [0/2] Time 1.699 (1.699) Loss 0.6546 (0.6546) Prec@1 82.031 (82.031) | ||
237 | + * epoch: 30 Prec@1 83.483 | ||
238 | + * epoch: 30 Prec@1 83.483 | ||
239 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
240 | +Epoch: [31][0/12] Time 2.174 (2.174) Loss 0.4846 (0.4846) Prec@1 83.984 (83.984) | ||
241 | +Epoch: [31][10/12] Time 0.160 (0.343) Loss 0.4474 (0.4548) Prec@1 84.375 (85.050) | ||
242 | +Test: [0/2] Time 1.678 (1.678) Loss 0.6281 (0.6281) Prec@1 84.375 (84.375) | ||
243 | + * epoch: 31 Prec@1 84.084 | ||
244 | + * epoch: 31 Prec@1 84.084 | ||
245 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
246 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
247 | +Epoch: [32][0/12] Time 1.929 (1.929) Loss 0.4780 (0.4780) Prec@1 83.594 (83.594) | ||
248 | +Epoch: [32][10/12] Time 0.160 (0.320) Loss 0.4670 (0.5008) Prec@1 85.938 (83.842) | ||
249 | +Test: [0/2] Time 1.698 (1.698) Loss 0.5501 (0.5501) Prec@1 82.422 (82.422) | ||
250 | + * epoch: 32 Prec@1 82.583 | ||
251 | + * epoch: 32 Prec@1 82.583 | ||
252 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
253 | +Epoch: [33][0/12] Time 2.224 (2.224) Loss 0.4372 (0.4372) Prec@1 89.062 (89.062) | ||
254 | +Epoch: [33][10/12] Time 0.159 (0.347) Loss 0.4002 (0.4672) Prec@1 87.500 (85.369) | ||
255 | +Test: [0/2] Time 1.650 (1.650) Loss 0.9937 (0.9937) Prec@1 65.234 (65.234) | ||
256 | + * epoch: 33 Prec@1 65.165 | ||
257 | + * epoch: 33 Prec@1 65.165 | ||
258 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
259 | +Epoch: [34][0/12] Time 2.233 (2.233) Loss 0.5080 (0.5080) Prec@1 82.031 (82.031) | ||
260 | +Epoch: [34][10/12] Time 0.159 (0.347) Loss 0.4675 (0.4632) Prec@1 84.766 (84.908) | ||
261 | +Test: [0/2] Time 1.673 (1.673) Loss 0.6285 (0.6285) Prec@1 83.984 (83.984) | ||
262 | + * epoch: 34 Prec@1 84.084 | ||
263 | + * epoch: 34 Prec@1 84.084 | ||
264 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
265 | +Epoch: [35][0/12] Time 1.915 (1.915) Loss 0.4910 (0.4910) Prec@1 86.328 (86.328) | ||
266 | +Epoch: [35][10/12] Time 0.159 (0.320) Loss 0.3640 (0.4289) Prec@1 88.281 (86.222) | ||
267 | +Test: [0/2] Time 1.725 (1.725) Loss 0.9229 (0.9229) Prec@1 80.859 (80.859) | ||
268 | + * epoch: 35 Prec@1 80.180 | ||
269 | + * epoch: 35 Prec@1 80.180 | ||
270 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
271 | +Epoch: [36][0/12] Time 1.938 (1.938) Loss 0.5173 (0.5173) Prec@1 81.641 (81.641) | ||
272 | +Epoch: [36][10/12] Time 0.159 (0.321) Loss 0.5336 (0.4753) Prec@1 83.203 (84.553) | ||
273 | +Test: [0/2] Time 1.725 (1.725) Loss 0.5637 (0.5637) Prec@1 87.109 (87.109) | ||
274 | + * epoch: 36 Prec@1 86.186 | ||
275 | + * epoch: 36 Prec@1 86.186 | ||
276 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
277 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
278 | +Epoch: [37][0/12] Time 2.224 (2.224) Loss 0.3901 (0.3901) Prec@1 86.719 (86.719) | ||
279 | +Epoch: [37][10/12] Time 0.159 (0.347) Loss 0.4682 (0.4061) Prec@1 82.422 (86.648) | ||
280 | +Test: [0/2] Time 1.686 (1.686) Loss 0.9415 (0.9415) Prec@1 74.219 (74.219) | ||
281 | + * epoch: 37 Prec@1 74.474 | ||
282 | + * epoch: 37 Prec@1 74.474 | ||
283 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
284 | +Epoch: [38][0/12] Time 1.960 (1.960) Loss 0.4797 (0.4797) Prec@1 84.375 (84.375) | ||
285 | +Epoch: [38][10/12] Time 0.159 (0.324) Loss 0.4020 (0.4410) Prec@1 88.672 (85.653) | ||
286 | +Test: [0/2] Time 1.692 (1.692) Loss 0.4875 (0.4875) Prec@1 84.375 (84.375) | ||
287 | + * epoch: 38 Prec@1 83.483 | ||
288 | + * epoch: 38 Prec@1 83.483 | ||
289 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
290 | +Epoch: [39][0/12] Time 1.931 (1.931) Loss 0.4579 (0.4579) Prec@1 85.156 (85.156) | ||
291 | +Epoch: [39][10/12] Time 0.159 (0.330) Loss 0.3570 (0.3953) Prec@1 87.500 (86.754) | ||
292 | +Test: [0/2] Time 1.673 (1.673) Loss 0.6890 (0.6890) Prec@1 84.766 (84.766) | ||
293 | + * epoch: 39 Prec@1 83.784 | ||
294 | + * epoch: 39 Prec@1 83.784 | ||
295 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
296 | +Epoch: [40][0/12] Time 1.908 (1.908) Loss 0.5327 (0.5327) Prec@1 82.031 (82.031) | ||
297 | +Epoch: [40][10/12] Time 0.160 (0.320) Loss 0.3981 (0.4445) Prec@1 88.281 (85.724) | ||
298 | +Test: [0/2] Time 1.706 (1.706) Loss 0.4213 (0.4213) Prec@1 88.672 (88.672) | ||
299 | + * epoch: 40 Prec@1 87.387 | ||
300 | + * epoch: 40 Prec@1 87.387 | ||
301 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
302 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
303 | +Epoch: [41][0/12] Time 1.924 (1.924) Loss 0.4044 (0.4044) Prec@1 85.938 (85.938) | ||
304 | +Epoch: [41][10/12] Time 0.160 (0.318) Loss 0.4958 (0.3959) Prec@1 86.719 (86.861) | ||
305 | +Test: [0/2] Time 1.699 (1.699) Loss 0.7511 (0.7511) Prec@1 76.172 (76.172) | ||
306 | + * epoch: 41 Prec@1 74.174 | ||
307 | + * epoch: 41 Prec@1 74.174 | ||
308 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
309 | +Epoch: [42][0/12] Time 2.204 (2.204) Loss 0.4268 (0.4268) Prec@1 88.281 (88.281) | ||
310 | +Epoch: [42][10/12] Time 0.158 (0.345) Loss 0.4259 (0.4605) Prec@1 87.109 (84.979) | ||
311 | +Test: [0/2] Time 1.730 (1.730) Loss 0.6196 (0.6196) Prec@1 87.109 (87.109) | ||
312 | + * epoch: 42 Prec@1 87.387 | ||
313 | + * epoch: 42 Prec@1 87.387 | ||
314 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
315 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
316 | +Epoch: [43][0/12] Time 2.063 (2.063) Loss 0.4617 (0.4617) Prec@1 84.375 (84.375) | ||
317 | +Epoch: [43][10/12] Time 0.159 (0.332) Loss 0.4178 (0.3862) Prec@1 87.891 (87.287) | ||
318 | +Test: [0/2] Time 1.692 (1.692) Loss 0.7116 (0.7116) Prec@1 78.516 (78.516) | ||
319 | + * epoch: 43 Prec@1 78.378 | ||
320 | + * epoch: 43 Prec@1 78.378 | ||
321 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
322 | +Epoch: [44][0/12] Time 1.968 (1.968) Loss 0.6055 (0.6055) Prec@1 76.953 (76.953) | ||
323 | +Epoch: [44][10/12] Time 0.160 (0.331) Loss 0.4583 (0.4649) Prec@1 85.156 (83.487) | ||
324 | +Test: [0/2] Time 1.690 (1.690) Loss 0.4601 (0.4601) Prec@1 87.109 (87.109) | ||
325 | + * epoch: 44 Prec@1 86.787 | ||
326 | + * epoch: 44 Prec@1 86.787 | ||
327 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
328 | +Epoch: [45][0/12] Time 1.908 (1.908) Loss 0.4238 (0.4238) Prec@1 87.109 (87.109) | ||
329 | +Epoch: [45][10/12] Time 0.159 (0.318) Loss 0.5030 (0.3826) Prec@1 82.812 (87.074) | ||
330 | +Test: [0/2] Time 1.707 (1.707) Loss 0.7285 (0.7285) Prec@1 79.688 (79.688) | ||
331 | + * epoch: 45 Prec@1 80.781 | ||
332 | + * epoch: 45 Prec@1 80.781 | ||
333 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
334 | +Epoch: [46][0/12] Time 1.955 (1.955) Loss 0.5765 (0.5765) Prec@1 81.250 (81.250) | ||
335 | +Epoch: [46][10/12] Time 0.159 (0.321) Loss 0.3067 (0.4130) Prec@1 88.672 (86.009) | ||
336 | +Test: [0/2] Time 1.686 (1.686) Loss 0.4962 (0.4962) Prec@1 89.062 (89.062) | ||
337 | + * epoch: 46 Prec@1 88.288 | ||
338 | + * epoch: 46 Prec@1 88.288 | ||
339 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
340 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
341 | +Epoch: [47][0/12] Time 1.986 (1.986) Loss 0.3760 (0.3760) Prec@1 87.891 (87.891) | ||
342 | +Epoch: [47][10/12] Time 0.159 (0.335) Loss 0.3775 (0.3699) Prec@1 84.766 (87.216) | ||
343 | +Test: [0/2] Time 1.697 (1.697) Loss 0.5537 (0.5537) Prec@1 85.938 (85.938) | ||
344 | + * epoch: 47 Prec@1 85.886 | ||
345 | + * epoch: 47 Prec@1 85.886 | ||
346 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
347 | +Epoch: [48][0/12] Time 1.951 (1.951) Loss 0.3737 (0.3737) Prec@1 87.500 (87.500) | ||
348 | +Epoch: [48][10/12] Time 0.159 (0.322) Loss 0.3257 (0.3962) Prec@1 88.672 (87.145) | ||
349 | +Test: [0/2] Time 1.693 (1.693) Loss 0.5342 (0.5342) Prec@1 87.109 (87.109) | ||
350 | + * epoch: 48 Prec@1 85.586 | ||
351 | + * epoch: 48 Prec@1 85.586 | ||
352 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
353 | +Epoch: [49][0/12] Time 1.991 (1.991) Loss 0.3158 (0.3158) Prec@1 88.672 (88.672) | ||
354 | +Epoch: [49][10/12] Time 0.159 (0.326) Loss 0.4068 (0.3745) Prec@1 87.109 (87.571) | ||
355 | +Test: [0/2] Time 1.684 (1.684) Loss 0.5855 (0.5855) Prec@1 81.641 (81.641) | ||
356 | + * epoch: 49 Prec@1 80.781 | ||
357 | + * epoch: 49 Prec@1 80.781 | ||
358 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
359 | +Epoch: [50][0/12] Time 1.904 (1.904) Loss 0.6005 (0.6005) Prec@1 79.297 (79.297) | ||
360 | +Epoch: [50][10/12] Time 0.159 (0.319) Loss 0.3856 (0.3787) Prec@1 86.328 (87.464) | ||
361 | +Test: [0/2] Time 1.680 (1.680) Loss 0.6797 (0.6797) Prec@1 77.344 (77.344) | ||
362 | + * epoch: 50 Prec@1 78.979 | ||
363 | + * epoch: 50 Prec@1 78.979 | ||
364 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
365 | +Epoch: [51][0/12] Time 2.151 (2.151) Loss 0.3381 (0.3381) Prec@1 89.844 (89.844) | ||
366 | +Epoch: [51][10/12] Time 0.157 (0.340) Loss 0.3905 (0.3525) Prec@1 83.594 (88.352) | ||
367 | +Test: [0/2] Time 1.705 (1.705) Loss 0.5561 (0.5561) Prec@1 86.328 (86.328) | ||
368 | + * epoch: 51 Prec@1 85.886 | ||
369 | + * epoch: 51 Prec@1 85.886 | ||
370 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
371 | +Epoch: [52][0/12] Time 1.977 (1.977) Loss 0.3419 (0.3419) Prec@1 88.281 (88.281) | ||
372 | +Epoch: [52][10/12] Time 0.159 (0.325) Loss 0.3898 (0.3460) Prec@1 88.281 (88.565) | ||
373 | +Test: [0/2] Time 1.704 (1.704) Loss 0.4699 (0.4699) Prec@1 86.328 (86.328) | ||
374 | + * epoch: 52 Prec@1 85.285 | ||
375 | + * epoch: 52 Prec@1 85.285 | ||
376 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
377 | +Epoch: [53][0/12] Time 1.954 (1.954) Loss 0.2477 (0.2477) Prec@1 88.672 (88.672) | ||
378 | +Epoch: [53][10/12] Time 0.159 (0.330) Loss 0.3368 (0.3348) Prec@1 88.672 (88.920) | ||
379 | +Test: [0/2] Time 1.672 (1.672) Loss 0.9721 (0.9721) Prec@1 60.547 (60.547) | ||
380 | + * epoch: 53 Prec@1 61.261 | ||
381 | + * epoch: 53 Prec@1 61.261 | ||
382 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
383 | +Epoch: [54][0/12] Time 2.249 (2.249) Loss 0.4255 (0.4255) Prec@1 82.031 (82.031) | ||
384 | +Epoch: [54][10/12] Time 0.160 (0.349) Loss 0.3790 (0.3706) Prec@1 85.547 (86.506) | ||
385 | +Test: [0/2] Time 1.664 (1.664) Loss 0.4914 (0.4914) Prec@1 88.281 (88.281) | ||
386 | + * epoch: 54 Prec@1 88.889 | ||
387 | + * epoch: 54 Prec@1 88.889 | ||
388 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
389 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
390 | +Epoch: [55][0/12] Time 1.919 (1.919) Loss 0.3694 (0.3694) Prec@1 85.547 (85.547) | ||
391 | +Epoch: [55][10/12] Time 0.159 (0.319) Loss 0.3774 (0.3204) Prec@1 88.672 (89.240) | ||
392 | +Test: [0/2] Time 1.689 (1.689) Loss 0.7353 (0.7353) Prec@1 80.078 (80.078) | ||
393 | + * epoch: 55 Prec@1 79.880 | ||
394 | + * epoch: 55 Prec@1 79.880 | ||
395 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
396 | +Epoch: [56][0/12] Time 1.947 (1.947) Loss 0.4603 (0.4603) Prec@1 83.594 (83.594) | ||
397 | +Epoch: [56][10/12] Time 0.159 (0.322) Loss 0.3666 (0.3867) Prec@1 87.891 (86.932) | ||
398 | +Test: [0/2] Time 1.722 (1.722) Loss 0.4614 (0.4614) Prec@1 86.719 (86.719) | ||
399 | + * epoch: 56 Prec@1 86.486 | ||
400 | + * epoch: 56 Prec@1 86.486 | ||
401 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
402 | +Epoch: [57][0/12] Time 1.980 (1.980) Loss 0.3089 (0.3089) Prec@1 89.844 (89.844) | ||
403 | +Epoch: [57][10/12] Time 0.160 (0.323) Loss 0.2758 (0.3167) Prec@1 90.234 (89.027) | ||
404 | +Test: [0/2] Time 1.713 (1.713) Loss 0.7926 (0.7926) Prec@1 69.141 (69.141) | ||
405 | + * epoch: 57 Prec@1 68.769 | ||
406 | + * epoch: 57 Prec@1 68.769 | ||
407 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
408 | +Epoch: [58][0/12] Time 1.897 (1.897) Loss 0.3536 (0.3536) Prec@1 86.328 (86.328) | ||
409 | +Epoch: [58][10/12] Time 0.160 (0.318) Loss 0.3304 (0.3711) Prec@1 91.406 (87.109) | ||
410 | +Test: [0/2] Time 1.722 (1.722) Loss 0.4612 (0.4612) Prec@1 89.062 (89.062) | ||
411 | + * epoch: 58 Prec@1 89.790 | ||
412 | + * epoch: 58 Prec@1 89.790 | ||
413 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
414 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
415 | +Epoch: [59][0/12] Time 2.226 (2.226) Loss 0.2936 (0.2936) Prec@1 91.406 (91.406) | ||
416 | +Epoch: [59][10/12] Time 0.159 (0.348) Loss 0.3097 (0.3106) Prec@1 87.500 (89.560) | ||
417 | +Test: [0/2] Time 1.691 (1.691) Loss 0.5900 (0.5900) Prec@1 83.984 (83.984) | ||
418 | + * epoch: 59 Prec@1 85.285 | ||
419 | + * epoch: 59 Prec@1 85.285 | ||
420 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
421 | +Epoch: [60][0/12] Time 2.012 (2.012) Loss 0.2896 (0.2896) Prec@1 90.234 (90.234) | ||
422 | +Epoch: [60][10/12] Time 0.158 (0.326) Loss 0.3392 (0.3331) Prec@1 87.891 (88.246) | ||
423 | +Test: [0/2] Time 1.725 (1.725) Loss 0.5262 (0.5262) Prec@1 89.453 (89.453) | ||
424 | + * epoch: 60 Prec@1 88.889 | ||
425 | + * epoch: 60 Prec@1 88.889 | ||
426 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
427 | +Epoch: [61][0/12] Time 1.936 (1.936) Loss 0.3321 (0.3321) Prec@1 88.281 (88.281) | ||
428 | +Epoch: [61][10/12] Time 0.159 (0.320) Loss 0.3931 (0.3128) Prec@1 87.891 (89.347) | ||
429 | +Test: [0/2] Time 1.672 (1.672) Loss 0.8900 (0.8900) Prec@1 57.812 (57.812) | ||
430 | + * epoch: 61 Prec@1 58.258 | ||
431 | + * epoch: 61 Prec@1 58.258 | ||
432 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
433 | +Epoch: [62][0/12] Time 1.955 (1.955) Loss 0.3191 (0.3191) Prec@1 89.844 (89.844) | ||
434 | +Epoch: [62][10/12] Time 0.154 (0.322) Loss 0.3570 (0.3385) Prec@1 88.281 (88.636) | ||
435 | +Test: [0/2] Time 1.709 (1.709) Loss 0.4341 (0.4341) Prec@1 86.719 (86.719) | ||
436 | + * epoch: 62 Prec@1 86.486 | ||
437 | + * epoch: 62 Prec@1 86.486 | ||
438 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
439 | +Epoch: [63][0/12] Time 1.930 (1.930) Loss 0.2078 (0.2078) Prec@1 92.188 (92.188) | ||
440 | +Epoch: [63][10/12] Time 0.159 (0.320) Loss 0.3926 (0.2808) Prec@1 88.281 (90.376) | ||
441 | +Test: [0/2] Time 1.681 (1.681) Loss 0.5782 (0.5782) Prec@1 71.484 (71.484) | ||
442 | + * epoch: 63 Prec@1 70.270 | ||
443 | + * epoch: 63 Prec@1 70.270 | ||
444 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
445 | +Epoch: [64][0/12] Time 2.252 (2.252) Loss 0.3744 (0.3744) Prec@1 87.891 (87.891) | ||
446 | +Epoch: [64][10/12] Time 0.158 (0.349) Loss 0.3298 (0.3217) Prec@1 88.672 (89.276) | ||
447 | +Test: [0/2] Time 1.717 (1.717) Loss 0.4983 (0.4983) Prec@1 89.062 (89.062) | ||
448 | + * epoch: 64 Prec@1 89.489 | ||
449 | + * epoch: 64 Prec@1 89.489 | ||
450 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
451 | +Epoch: [65][0/12] Time 1.913 (1.913) Loss 0.3845 (0.3845) Prec@1 87.891 (87.891) | ||
452 | +Epoch: [65][10/12] Time 0.161 (0.332) Loss 0.2949 (0.3003) Prec@1 89.062 (89.950) | ||
453 | +Test: [0/2] Time 1.693 (1.693) Loss 0.5655 (0.5655) Prec@1 86.328 (86.328) | ||
454 | + * epoch: 65 Prec@1 87.087 | ||
455 | + * epoch: 65 Prec@1 87.087 | ||
456 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
457 | +Epoch: [66][0/12] Time 1.949 (1.949) Loss 0.4444 (0.4444) Prec@1 87.109 (87.109) | ||
458 | +Epoch: [66][10/12] Time 0.159 (0.322) Loss 0.3399 (0.3130) Prec@1 89.062 (89.098) | ||
459 | +Test: [0/2] Time 1.703 (1.703) Loss 0.5085 (0.5085) Prec@1 89.844 (89.844) | ||
460 | + * epoch: 66 Prec@1 87.688 | ||
461 | + * epoch: 66 Prec@1 87.688 | ||
462 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
463 | +Epoch: [67][0/12] Time 1.946 (1.946) Loss 0.2375 (0.2375) Prec@1 91.016 (91.016) | ||
464 | +Epoch: [67][10/12] Time 0.159 (0.328) Loss 0.3387 (0.2769) Prec@1 85.547 (90.128) | ||
465 | +Test: [0/2] Time 1.703 (1.703) Loss 0.5086 (0.5086) Prec@1 88.281 (88.281) | ||
466 | + * epoch: 67 Prec@1 87.387 | ||
467 | + * epoch: 67 Prec@1 87.387 | ||
468 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
469 | +Epoch: [68][0/12] Time 1.972 (1.972) Loss 0.3379 (0.3379) Prec@1 90.234 (90.234) | ||
470 | +Epoch: [68][10/12] Time 0.159 (0.324) Loss 0.2454 (0.3145) Prec@1 91.406 (88.672) | ||
471 | +Test: [0/2] Time 1.719 (1.719) Loss 0.6045 (0.6045) Prec@1 87.500 (87.500) | ||
472 | + * epoch: 68 Prec@1 88.589 | ||
473 | + * epoch: 68 Prec@1 88.589 | ||
474 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
475 | +Epoch: [69][0/12] Time 1.947 (1.947) Loss 0.2363 (0.2363) Prec@1 90.625 (90.625) | ||
476 | +Epoch: [69][10/12] Time 0.159 (0.321) Loss 0.2486 (0.2593) Prec@1 90.234 (90.874) | ||
477 | +Test: [0/2] Time 1.710 (1.710) Loss 0.5298 (0.5298) Prec@1 80.078 (80.078) | ||
478 | + * epoch: 69 Prec@1 81.381 | ||
479 | + * epoch: 69 Prec@1 81.381 | ||
480 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
481 | +Epoch: [70][0/12] Time 1.949 (1.949) Loss 0.2916 (0.2916) Prec@1 89.453 (89.453) | ||
482 | +Epoch: [70][10/12] Time 0.158 (0.328) Loss 0.2786 (0.2966) Prec@1 89.453 (89.737) | ||
483 | +Test: [0/2] Time 1.683 (1.683) Loss 0.4976 (0.4976) Prec@1 87.500 (87.500) | ||
484 | + * epoch: 70 Prec@1 88.288 | ||
485 | + * epoch: 70 Prec@1 88.288 | ||
486 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
487 | +Epoch: [71][0/12] Time 1.928 (1.928) Loss 0.1949 (0.1949) Prec@1 94.141 (94.141) | ||
488 | +Epoch: [71][10/12] Time 0.159 (0.330) Loss 0.1884 (0.2445) Prec@1 93.359 (90.696) | ||
489 | +Test: [0/2] Time 1.684 (1.684) Loss 0.5251 (0.5251) Prec@1 82.812 (82.812) | ||
490 | + * epoch: 71 Prec@1 81.081 | ||
491 | + * epoch: 71 Prec@1 81.081 | ||
492 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
493 | +Epoch: [72][0/12] Time 1.930 (1.930) Loss 0.3282 (0.3282) Prec@1 87.500 (87.500) | ||
494 | +Epoch: [72][10/12] Time 0.159 (0.320) Loss 0.3842 (0.3169) Prec@1 86.719 (89.666) | ||
495 | +Test: [0/2] Time 1.709 (1.709) Loss 0.6996 (0.6996) Prec@1 89.453 (89.453) | ||
496 | + * epoch: 72 Prec@1 90.390 | ||
497 | + * epoch: 72 Prec@1 90.390 | ||
498 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
499 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
500 | +Epoch: [73][0/12] Time 2.187 (2.187) Loss 0.3168 (0.3168) Prec@1 87.500 (87.500) | ||
501 | +Epoch: [73][10/12] Time 0.159 (0.343) Loss 0.3492 (0.2848) Prec@1 85.938 (89.986) | ||
502 | +Test: [0/2] Time 1.708 (1.708) Loss 0.9264 (0.9264) Prec@1 85.547 (85.547) | ||
503 | + * epoch: 73 Prec@1 86.787 | ||
504 | + * epoch: 73 Prec@1 86.787 | ||
505 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
506 | +Epoch: [74][0/12] Time 1.913 (1.913) Loss 0.3152 (0.3152) Prec@1 88.281 (88.281) | ||
507 | +Epoch: [74][10/12] Time 0.159 (0.319) Loss 0.2675 (0.2727) Prec@1 92.578 (90.518) | ||
508 | +Test: [0/2] Time 1.720 (1.720) Loss 0.5490 (0.5490) Prec@1 86.328 (86.328) | ||
509 | + * epoch: 74 Prec@1 86.186 | ||
510 | + * epoch: 74 Prec@1 86.186 | ||
511 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
512 | +Epoch: [75][0/12] Time 1.922 (1.922) Loss 0.2301 (0.2301) Prec@1 90.625 (90.625) | ||
513 | +Epoch: [75][10/12] Time 0.159 (0.320) Loss 0.3265 (0.2525) Prec@1 87.109 (91.229) | ||
514 | +Test: [0/2] Time 1.699 (1.699) Loss 0.6857 (0.6857) Prec@1 82.812 (82.812) | ||
515 | + * epoch: 75 Prec@1 82.282 | ||
516 | + * epoch: 75 Prec@1 82.282 | ||
517 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
518 | +Epoch: [76][0/12] Time 2.240 (2.240) Loss 0.2848 (0.2848) Prec@1 87.500 (87.500) | ||
519 | +Epoch: [76][10/12] Time 0.160 (0.348) Loss 0.3408 (0.3049) Prec@1 87.500 (89.560) | ||
520 | +Test: [0/2] Time 1.707 (1.707) Loss 0.5952 (0.5952) Prec@1 81.641 (81.641) | ||
521 | + * epoch: 76 Prec@1 81.682 | ||
522 | + * epoch: 76 Prec@1 81.682 | ||
523 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
524 | +Epoch: [77][0/12] Time 1.904 (1.904) Loss 0.2558 (0.2558) Prec@1 92.578 (92.578) | ||
525 | +Epoch: [77][10/12] Time 0.159 (0.324) Loss 0.3256 (0.2529) Prec@1 87.500 (91.229) | ||
526 | +Test: [0/2] Time 1.708 (1.708) Loss 0.5739 (0.5739) Prec@1 85.156 (85.156) | ||
527 | + * epoch: 77 Prec@1 82.883 | ||
528 | + * epoch: 77 Prec@1 82.883 | ||
529 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
530 | +Epoch: [78][0/12] Time 2.032 (2.032) Loss 0.2934 (0.2934) Prec@1 89.844 (89.844) | ||
531 | +Epoch: [78][10/12] Time 0.159 (0.330) Loss 0.2887 (0.2670) Prec@1 92.578 (91.300) | ||
532 | +Test: [0/2] Time 1.688 (1.688) Loss 0.5590 (0.5590) Prec@1 85.156 (85.156) | ||
533 | + * epoch: 78 Prec@1 86.186 | ||
534 | + * epoch: 78 Prec@1 86.186 | ||
535 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
536 | +Epoch: [79][0/12] Time 1.945 (1.945) Loss 0.2431 (0.2431) Prec@1 91.797 (91.797) | ||
537 | +Epoch: [79][10/12] Time 0.160 (0.332) Loss 0.3305 (0.2164) Prec@1 87.109 (92.543) | ||
538 | +Test: [0/2] Time 1.675 (1.675) Loss 0.7119 (0.7119) Prec@1 77.344 (77.344) | ||
539 | + * epoch: 79 Prec@1 76.877 | ||
540 | + * epoch: 79 Prec@1 76.877 | ||
541 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
542 | +Epoch: [80][0/12] Time 2.189 (2.189) Loss 0.2332 (0.2332) Prec@1 92.969 (92.969) | ||
543 | +Epoch: [80][10/12] Time 0.159 (0.343) Loss 0.2309 (0.2770) Prec@1 93.359 (91.335) | ||
544 | +Test: [0/2] Time 1.680 (1.680) Loss 0.3864 (0.3864) Prec@1 90.234 (90.234) | ||
545 | + * epoch: 80 Prec@1 87.688 | ||
546 | + * epoch: 80 Prec@1 87.688 | ||
547 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
548 | +Epoch: [81][0/12] Time 2.239 (2.239) Loss 0.2767 (0.2767) Prec@1 92.578 (92.578) | ||
549 | +Epoch: [81][10/12] Time 0.160 (0.348) Loss 0.3311 (0.2484) Prec@1 86.328 (91.335) | ||
550 | +Test: [0/2] Time 1.720 (1.720) Loss 0.6348 (0.6348) Prec@1 76.562 (76.562) | ||
551 | + * epoch: 81 Prec@1 76.276 | ||
552 | + * epoch: 81 Prec@1 76.276 | ||
553 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
554 | +Epoch: [82][0/12] Time 2.256 (2.256) Loss 0.1899 (0.1899) Prec@1 92.969 (92.969) | ||
555 | +Epoch: [82][10/12] Time 0.159 (0.349) Loss 0.2917 (0.2652) Prec@1 90.625 (91.442) | ||
556 | +Test: [0/2] Time 1.689 (1.689) Loss 0.5168 (0.5168) Prec@1 85.156 (85.156) | ||
557 | + * epoch: 82 Prec@1 85.886 | ||
558 | + * epoch: 82 Prec@1 85.886 | ||
559 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
560 | +Epoch: [83][0/12] Time 2.051 (2.051) Loss 0.1982 (0.1982) Prec@1 92.188 (92.188) | ||
561 | +Epoch: [83][10/12] Time 0.159 (0.332) Loss 0.2491 (0.2078) Prec@1 89.844 (92.543) | ||
562 | +Test: [0/2] Time 1.682 (1.682) Loss 0.6296 (0.6296) Prec@1 80.078 (80.078) | ||
563 | + * epoch: 83 Prec@1 77.177 | ||
564 | + * epoch: 83 Prec@1 77.177 | ||
565 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
566 | +Epoch: [84][0/12] Time 1.912 (1.912) Loss 0.1927 (0.1927) Prec@1 93.359 (93.359) | ||
567 | +Epoch: [84][10/12] Time 0.157 (0.318) Loss 0.1888 (0.2599) Prec@1 93.359 (91.406) | ||
568 | +Test: [0/2] Time 1.707 (1.707) Loss 0.5512 (0.5512) Prec@1 88.672 (88.672) | ||
569 | + * epoch: 84 Prec@1 89.790 | ||
570 | + * epoch: 84 Prec@1 89.790 | ||
571 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
572 | +Epoch: [85][0/12] Time 1.909 (1.909) Loss 0.1752 (0.1752) Prec@1 93.750 (93.750) | ||
573 | +Epoch: [85][10/12] Time 0.161 (0.319) Loss 0.2747 (0.2133) Prec@1 91.797 (92.614) | ||
574 | +Test: [0/2] Time 1.704 (1.704) Loss 0.5671 (0.5671) Prec@1 86.328 (86.328) | ||
575 | + * epoch: 85 Prec@1 87.387 | ||
576 | + * epoch: 85 Prec@1 87.387 | ||
577 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
578 | +Epoch: [86][0/12] Time 2.240 (2.240) Loss 0.2070 (0.2070) Prec@1 89.844 (89.844) | ||
579 | +Epoch: [86][10/12] Time 0.160 (0.348) Loss 0.2832 (0.2450) Prec@1 91.797 (91.513) | ||
580 | +Test: [0/2] Time 1.709 (1.709) Loss 0.5368 (0.5368) Prec@1 86.719 (86.719) | ||
581 | + * epoch: 86 Prec@1 85.586 | ||
582 | + * epoch: 86 Prec@1 85.586 | ||
583 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
584 | +Epoch: [87][0/12] Time 1.953 (1.953) Loss 0.1469 (0.1469) Prec@1 96.094 (96.094) | ||
585 | +Epoch: [87][10/12] Time 0.161 (0.329) Loss 0.1976 (0.2014) Prec@1 92.969 (93.466) | ||
586 | +Test: [0/2] Time 1.704 (1.704) Loss 0.6268 (0.6268) Prec@1 86.328 (86.328) | ||
587 | + * epoch: 87 Prec@1 88.288 | ||
588 | + * epoch: 87 Prec@1 88.288 | ||
589 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
590 | +Epoch: [88][0/12] Time 1.976 (1.976) Loss 0.1892 (0.1892) Prec@1 92.969 (92.969) | ||
591 | +Epoch: [88][10/12] Time 0.159 (0.323) Loss 0.1663 (0.2332) Prec@1 93.359 (91.442) | ||
592 | +Test: [0/2] Time 1.725 (1.725) Loss 0.4768 (0.4768) Prec@1 89.844 (89.844) | ||
593 | + * epoch: 88 Prec@1 89.489 | ||
594 | + * epoch: 88 Prec@1 89.489 | ||
595 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
596 | +Epoch: [89][0/12] Time 2.208 (2.208) Loss 0.1286 (0.1286) Prec@1 94.141 (94.141) | ||
597 | +Epoch: [89][10/12] Time 0.159 (0.345) Loss 0.1622 (0.1895) Prec@1 93.750 (92.862) | ||
598 | +Test: [0/2] Time 1.670 (1.670) Loss 0.7504 (0.7504) Prec@1 86.719 (86.719) | ||
599 | + * epoch: 89 Prec@1 88.288 | ||
600 | + * epoch: 89 Prec@1 88.288 | ||
601 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
602 | +Epoch: [90][0/12] Time 1.919 (1.919) Loss 0.1749 (0.1749) Prec@1 94.531 (94.531) | ||
603 | +Epoch: [90][10/12] Time 0.160 (0.319) Loss 0.3030 (0.2244) Prec@1 90.234 (92.472) | ||
604 | +Test: [0/2] Time 1.703 (1.703) Loss 0.6520 (0.6520) Prec@1 87.500 (87.500) | ||
605 | + * epoch: 90 Prec@1 89.489 | ||
606 | + * epoch: 90 Prec@1 89.489 | ||
607 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
608 | +Epoch: [91][0/12] Time 2.019 (2.019) Loss 0.1967 (0.1967) Prec@1 93.750 (93.750) | ||
609 | +Epoch: [91][10/12] Time 0.160 (0.328) Loss 0.2092 (0.1974) Prec@1 91.406 (93.146) | ||
610 | +Test: [0/2] Time 1.717 (1.717) Loss 0.5337 (0.5337) Prec@1 89.453 (89.453) | ||
611 | + * epoch: 91 Prec@1 89.489 | ||
612 | + * epoch: 91 Prec@1 89.489 | ||
613 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
614 | +Epoch: [92][0/12] Time 1.908 (1.908) Loss 0.1796 (0.1796) Prec@1 93.750 (93.750) | ||
615 | +Epoch: [92][10/12] Time 0.160 (0.320) Loss 0.2263 (0.2480) Prec@1 93.359 (91.868) | ||
616 | +Test: [0/2] Time 1.715 (1.715) Loss 0.4933 (0.4933) Prec@1 88.672 (88.672) | ||
617 | + * epoch: 92 Prec@1 89.189 | ||
618 | + * epoch: 92 Prec@1 89.189 | ||
619 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
620 | +Epoch: [93][0/12] Time 1.934 (1.934) Loss 0.1817 (0.1817) Prec@1 93.750 (93.750) | ||
621 | +Epoch: [93][10/12] Time 0.158 (0.320) Loss 0.2488 (0.2133) Prec@1 90.625 (92.330) | ||
622 | +Test: [0/2] Time 1.699 (1.699) Loss 0.7086 (0.7086) Prec@1 80.469 (80.469) | ||
623 | + * epoch: 93 Prec@1 79.279 | ||
624 | + * epoch: 93 Prec@1 79.279 | ||
625 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
626 | +Epoch: [94][0/12] Time 1.955 (1.955) Loss 0.2422 (0.2422) Prec@1 89.453 (89.453) | ||
627 | +Epoch: [94][10/12] Time 0.159 (0.322) Loss 0.1780 (0.2533) Prec@1 93.359 (90.518) | ||
628 | +Test: [0/2] Time 1.699 (1.699) Loss 0.6035 (0.6035) Prec@1 89.062 (89.062) | ||
629 | + * epoch: 94 Prec@1 89.189 | ||
630 | + * epoch: 94 Prec@1 89.189 | ||
631 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
632 | +Epoch: [95][0/12] Time 1.975 (1.975) Loss 0.2582 (0.2582) Prec@1 90.234 (90.234) | ||
633 | +Epoch: [95][10/12] Time 0.159 (0.323) Loss 0.2944 (0.2084) Prec@1 92.188 (92.791) | ||
634 | +Test: [0/2] Time 1.730 (1.730) Loss 1.0666 (1.0666) Prec@1 67.188 (67.188) | ||
635 | + * epoch: 95 Prec@1 66.967 | ||
636 | + * epoch: 95 Prec@1 66.967 | ||
637 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
638 | +Epoch: [96][0/12] Time 1.966 (1.966) Loss 0.1643 (0.1643) Prec@1 94.141 (94.141) | ||
639 | +Epoch: [96][10/12] Time 0.160 (0.331) Loss 0.2444 (0.2336) Prec@1 91.016 (92.294) | ||
640 | +Test: [0/2] Time 1.694 (1.694) Loss 0.5861 (0.5861) Prec@1 86.328 (86.328) | ||
641 | + * epoch: 96 Prec@1 86.486 | ||
642 | + * epoch: 96 Prec@1 86.486 | ||
643 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
644 | +Epoch: [97][0/12] Time 1.914 (1.914) Loss 0.1659 (0.1659) Prec@1 94.531 (94.531) | ||
645 | +Epoch: [97][10/12] Time 0.160 (0.330) Loss 0.2504 (0.1904) Prec@1 92.578 (93.466) | ||
646 | +Test: [0/2] Time 1.678 (1.678) Loss 0.6894 (0.6894) Prec@1 78.906 (78.906) | ||
647 | + * epoch: 97 Prec@1 80.480 | ||
648 | + * epoch: 97 Prec@1 80.480 | ||
649 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
650 | +Epoch: [98][0/12] Time 1.902 (1.902) Loss 0.2155 (0.2155) Prec@1 91.406 (91.406) | ||
651 | +Epoch: [98][10/12] Time 0.159 (0.317) Loss 0.2941 (0.2129) Prec@1 92.578 (92.401) | ||
652 | +Test: [0/2] Time 1.687 (1.687) Loss 0.3753 (0.3753) Prec@1 94.922 (94.922) | ||
653 | + * epoch: 98 Prec@1 93.093 | ||
654 | + * epoch: 98 Prec@1 93.093 | ||
655 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
656 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
657 | +Epoch: [99][0/12] Time 1.923 (1.923) Loss 0.1940 (0.1940) Prec@1 95.312 (95.312) | ||
658 | +Epoch: [99][10/12] Time 0.160 (0.321) Loss 0.1865 (0.1930) Prec@1 93.750 (93.537) | ||
659 | +Test: [0/2] Time 1.672 (1.672) Loss 0.5767 (0.5767) Prec@1 83.594 (83.594) | ||
660 | + * epoch: 99 Prec@1 83.483 | ||
661 | + * epoch: 99 Prec@1 83.483 | ||
662 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
663 | +Epoch: [100][0/12] Time 1.946 (1.946) Loss 0.3028 (0.3028) Prec@1 91.016 (91.016) | ||
664 | +Epoch: [100][10/12] Time 0.159 (0.323) Loss 0.2235 (0.2103) Prec@1 91.406 (92.969) | ||
665 | +Test: [0/2] Time 1.704 (1.704) Loss 0.5625 (0.5625) Prec@1 89.844 (89.844) | ||
666 | + * epoch: 100 Prec@1 88.889 | ||
667 | + * epoch: 100 Prec@1 88.889 | ||
668 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
669 | +Epoch: [101][0/12] Time 1.920 (1.920) Loss 0.3380 (0.3380) Prec@1 89.844 (89.844) | ||
670 | +Epoch: [101][10/12] Time 0.159 (0.320) Loss 0.1733 (0.1909) Prec@1 92.188 (93.679) | ||
671 | +Test: [0/2] Time 1.678 (1.678) Loss 0.6445 (0.6445) Prec@1 89.062 (89.062) | ||
672 | + * epoch: 101 Prec@1 88.589 | ||
673 | + * epoch: 101 Prec@1 88.589 | ||
674 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
675 | +Epoch: [102][0/12] Time 1.895 (1.895) Loss 0.2093 (0.2093) Prec@1 92.969 (92.969) | ||
676 | +Epoch: [102][10/12] Time 0.159 (0.317) Loss 0.2647 (0.2172) Prec@1 89.844 (92.791) | ||
677 | +Test: [0/2] Time 1.691 (1.691) Loss 0.4537 (0.4537) Prec@1 89.844 (89.844) | ||
678 | + * epoch: 102 Prec@1 90.390 | ||
679 | + * epoch: 102 Prec@1 90.390 | ||
680 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
681 | +Epoch: [103][0/12] Time 1.975 (1.975) Loss 0.1979 (0.1979) Prec@1 92.969 (92.969) | ||
682 | +Epoch: [103][10/12] Time 0.159 (0.324) Loss 0.2140 (0.1836) Prec@1 89.844 (93.217) | ||
683 | +Test: [0/2] Time 1.706 (1.706) Loss 0.7860 (0.7860) Prec@1 89.062 (89.062) | ||
684 | + * epoch: 103 Prec@1 90.090 | ||
685 | + * epoch: 103 Prec@1 90.090 | ||
686 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
687 | +Epoch: [104][0/12] Time 2.022 (2.022) Loss 0.2810 (0.2810) Prec@1 92.578 (92.578) | ||
688 | +Epoch: [104][10/12] Time 0.159 (0.330) Loss 0.2480 (0.2219) Prec@1 91.797 (92.081) | ||
689 | +Test: [0/2] Time 1.716 (1.716) Loss 0.6215 (0.6215) Prec@1 90.625 (90.625) | ||
690 | + * epoch: 104 Prec@1 91.592 | ||
691 | + * epoch: 104 Prec@1 91.592 | ||
692 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
693 | +Epoch: [105][0/12] Time 1.906 (1.906) Loss 0.1642 (0.1642) Prec@1 95.312 (95.312) | ||
694 | +Epoch: [105][10/12] Time 0.159 (0.319) Loss 0.1640 (0.1851) Prec@1 93.750 (93.928) | ||
695 | +Test: [0/2] Time 1.708 (1.708) Loss 0.5004 (0.5004) Prec@1 90.625 (90.625) | ||
696 | + * epoch: 105 Prec@1 90.390 | ||
697 | + * epoch: 105 Prec@1 90.390 | ||
698 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
699 | +Epoch: [106][0/12] Time 1.909 (1.909) Loss 0.1803 (0.1803) Prec@1 90.234 (90.234) | ||
700 | +Epoch: [106][10/12] Time 0.158 (0.319) Loss 0.1994 (0.2141) Prec@1 91.797 (92.223) | ||
701 | +Test: [0/2] Time 1.688 (1.688) Loss 0.4920 (0.4920) Prec@1 82.031 (82.031) | ||
702 | + * epoch: 106 Prec@1 79.580 | ||
703 | + * epoch: 106 Prec@1 79.580 | ||
704 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
705 | +Epoch: [107][0/12] Time 1.971 (1.971) Loss 0.1579 (0.1579) Prec@1 92.969 (92.969) | ||
706 | +Epoch: [107][10/12] Time 0.160 (0.330) Loss 0.1896 (0.1695) Prec@1 93.359 (93.786) | ||
707 | +Test: [0/2] Time 1.693 (1.693) Loss 0.6627 (0.6627) Prec@1 85.156 (85.156) | ||
708 | + * epoch: 107 Prec@1 85.285 | ||
709 | + * epoch: 107 Prec@1 85.285 | ||
710 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
711 | +Epoch: [108][0/12] Time 1.926 (1.926) Loss 0.2721 (0.2721) Prec@1 90.234 (90.234) | ||
712 | +Epoch: [108][10/12] Time 0.159 (0.334) Loss 0.1391 (0.1956) Prec@1 94.531 (93.395) | ||
713 | +Test: [0/2] Time 1.693 (1.693) Loss 0.6169 (0.6169) Prec@1 87.891 (87.891) | ||
714 | + * epoch: 108 Prec@1 88.889 | ||
715 | + * epoch: 108 Prec@1 88.889 | ||
716 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
717 | +Epoch: [109][0/12] Time 1.932 (1.932) Loss 0.1255 (0.1255) Prec@1 96.094 (96.094) | ||
718 | +Epoch: [109][10/12] Time 0.159 (0.318) Loss 0.1880 (0.1642) Prec@1 93.750 (94.567) | ||
719 | +Test: [0/2] Time 1.702 (1.702) Loss 0.6942 (0.6942) Prec@1 86.719 (86.719) | ||
720 | + * epoch: 109 Prec@1 88.288 | ||
721 | + * epoch: 109 Prec@1 88.288 | ||
722 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
723 | +Epoch: [110][0/12] Time 1.941 (1.941) Loss 0.1494 (0.1494) Prec@1 95.312 (95.312) | ||
724 | +Epoch: [110][10/12] Time 0.159 (0.320) Loss 0.2451 (0.2071) Prec@1 91.016 (92.330) | ||
725 | +Test: [0/2] Time 1.721 (1.721) Loss 0.5960 (0.5960) Prec@1 86.328 (86.328) | ||
726 | + * epoch: 110 Prec@1 86.486 | ||
727 | + * epoch: 110 Prec@1 86.486 | ||
728 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
729 | +Epoch: [111][0/12] Time 1.969 (1.969) Loss 0.1588 (0.1588) Prec@1 94.141 (94.141) | ||
730 | +Epoch: [111][10/12] Time 0.160 (0.323) Loss 0.1406 (0.1750) Prec@1 96.094 (93.395) | ||
731 | +Test: [0/2] Time 1.680 (1.680) Loss 0.6977 (0.6977) Prec@1 90.625 (90.625) | ||
732 | + * epoch: 111 Prec@1 90.090 | ||
733 | + * epoch: 111 Prec@1 90.090 | ||
734 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
735 | +Epoch: [112][0/12] Time 1.914 (1.914) Loss 0.2185 (0.2185) Prec@1 92.578 (92.578) | ||
736 | +Epoch: [112][10/12] Time 0.161 (0.318) Loss 0.1134 (0.1589) Prec@1 94.922 (94.389) | ||
737 | +Test: [0/2] Time 1.692 (1.692) Loss 0.6469 (0.6469) Prec@1 89.062 (89.062) | ||
738 | + * epoch: 112 Prec@1 89.489 | ||
739 | + * epoch: 112 Prec@1 89.489 | ||
740 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
741 | +Epoch: [113][0/12] Time 1.978 (1.978) Loss 0.1283 (0.1283) Prec@1 95.703 (95.703) | ||
742 | +Epoch: [113][10/12] Time 0.158 (0.323) Loss 0.2125 (0.1576) Prec@1 93.359 (94.815) | ||
743 | +Test: [0/2] Time 1.699 (1.699) Loss 0.9452 (0.9452) Prec@1 67.578 (67.578) | ||
744 | + * epoch: 113 Prec@1 65.766 | ||
745 | + * epoch: 113 Prec@1 65.766 | ||
746 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
747 | +Epoch: [114][0/12] Time 1.950 (1.950) Loss 0.2040 (0.2040) Prec@1 92.188 (92.188) | ||
748 | +Epoch: [114][10/12] Time 0.159 (0.322) Loss 0.2110 (0.1899) Prec@1 94.531 (93.253) | ||
749 | +Test: [0/2] Time 1.687 (1.687) Loss 0.5138 (0.5138) Prec@1 86.719 (86.719) | ||
750 | + * epoch: 114 Prec@1 86.486 | ||
751 | + * epoch: 114 Prec@1 86.486 | ||
752 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
753 | +Epoch: [115][0/12] Time 1.998 (1.998) Loss 0.1638 (0.1638) Prec@1 95.703 (95.703) | ||
754 | +Epoch: [115][10/12] Time 0.160 (0.331) Loss 0.2106 (0.1672) Prec@1 92.578 (94.354) | ||
755 | +Test: [0/2] Time 1.709 (1.709) Loss 0.8591 (0.8591) Prec@1 89.062 (89.062) | ||
756 | + * epoch: 115 Prec@1 89.790 | ||
757 | + * epoch: 115 Prec@1 89.790 | ||
758 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
759 | +Epoch: [116][0/12] Time 1.976 (1.976) Loss 0.1392 (0.1392) Prec@1 92.969 (92.969) | ||
760 | +Epoch: [116][10/12] Time 0.160 (0.336) Loss 0.2718 (0.2105) Prec@1 92.578 (93.253) | ||
761 | +Test: [0/2] Time 1.718 (1.718) Loss 0.4857 (0.4857) Prec@1 88.281 (88.281) | ||
762 | + * epoch: 116 Prec@1 88.589 | ||
763 | + * epoch: 116 Prec@1 88.589 | ||
764 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
765 | +Epoch: [117][0/12] Time 2.203 (2.203) Loss 0.1616 (0.1616) Prec@1 92.969 (92.969) | ||
766 | +Epoch: [117][10/12] Time 0.159 (0.345) Loss 0.1600 (0.1427) Prec@1 94.141 (94.638) | ||
767 | +Test: [0/2] Time 1.732 (1.732) Loss 0.8432 (0.8432) Prec@1 89.453 (89.453) | ||
768 | + * epoch: 117 Prec@1 90.691 | ||
769 | + * epoch: 117 Prec@1 90.691 | ||
770 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
771 | +Epoch: [118][0/12] Time 1.918 (1.918) Loss 0.1563 (0.1563) Prec@1 94.141 (94.141) | ||
772 | +Epoch: [118][10/12] Time 0.160 (0.319) Loss 0.1545 (0.1740) Prec@1 93.750 (93.892) | ||
773 | +Test: [0/2] Time 1.722 (1.722) Loss 0.4324 (0.4324) Prec@1 91.797 (91.797) | ||
774 | + * epoch: 118 Prec@1 90.991 | ||
775 | + * epoch: 118 Prec@1 90.991 | ||
776 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
777 | +Epoch: [119][0/12] Time 1.912 (1.912) Loss 0.1632 (0.1632) Prec@1 92.578 (92.578) | ||
778 | +Epoch: [119][10/12] Time 0.160 (0.317) Loss 0.1550 (0.1470) Prec@1 94.922 (94.638) | ||
779 | +Test: [0/2] Time 1.743 (1.743) Loss 0.5448 (0.5448) Prec@1 86.719 (86.719) | ||
780 | + * epoch: 119 Prec@1 85.886 | ||
781 | + * epoch: 119 Prec@1 85.886 | ||
782 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
783 | +Epoch: [120][0/12] Time 1.956 (1.956) Loss 0.1617 (0.1617) Prec@1 95.703 (95.703) | ||
784 | +Epoch: [120][10/12] Time 0.159 (0.322) Loss 0.1568 (0.1884) Prec@1 94.531 (93.466) | ||
785 | +Test: [0/2] Time 1.724 (1.724) Loss 0.4884 (0.4884) Prec@1 89.062 (89.062) | ||
786 | + * epoch: 120 Prec@1 89.189 | ||
787 | + * epoch: 120 Prec@1 89.189 | ||
788 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
789 | +Epoch: [121][0/12] Time 2.252 (2.252) Loss 0.0956 (0.0956) Prec@1 96.484 (96.484) | ||
790 | +Epoch: [121][10/12] Time 0.160 (0.350) Loss 0.0892 (0.1378) Prec@1 96.094 (94.425) | ||
791 | +Test: [0/2] Time 1.702 (1.702) Loss 0.9220 (0.9220) Prec@1 71.875 (71.875) | ||
792 | + * epoch: 121 Prec@1 72.372 | ||
793 | + * epoch: 121 Prec@1 72.372 | ||
794 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
795 | +Epoch: [122][0/12] Time 2.193 (2.193) Loss 0.1376 (0.1376) Prec@1 93.359 (93.359) | ||
796 | +Epoch: [122][10/12] Time 0.154 (0.344) Loss 0.1217 (0.1669) Prec@1 95.312 (94.034) | ||
797 | +Test: [0/2] Time 1.728 (1.728) Loss 0.4749 (0.4749) Prec@1 91.406 (91.406) | ||
798 | + * epoch: 122 Prec@1 90.090 | ||
799 | + * epoch: 122 Prec@1 90.090 | ||
800 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
801 | +Epoch: [123][0/12] Time 1.969 (1.969) Loss 0.1731 (0.1731) Prec@1 93.359 (93.359) | ||
802 | +Epoch: [123][10/12] Time 0.160 (0.332) Loss 0.1657 (0.1350) Prec@1 95.703 (95.419) | ||
803 | +Test: [0/2] Time 1.702 (1.702) Loss 0.4422 (0.4422) Prec@1 92.188 (92.188) | ||
804 | + * epoch: 123 Prec@1 90.991 | ||
805 | + * epoch: 123 Prec@1 90.991 | ||
806 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
807 | +Epoch: [124][0/12] Time 1.910 (1.910) Loss 0.1530 (0.1530) Prec@1 94.922 (94.922) | ||
808 | +Epoch: [124][10/12] Time 0.161 (0.335) Loss 0.1130 (0.1614) Prec@1 96.094 (94.744) | ||
809 | +Test: [0/2] Time 1.703 (1.703) Loss 0.6778 (0.6778) Prec@1 91.016 (91.016) | ||
810 | + * epoch: 124 Prec@1 91.592 | ||
811 | + * epoch: 124 Prec@1 91.592 | ||
812 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
813 | +Epoch: [125][0/12] Time 1.927 (1.927) Loss 0.1401 (0.1401) Prec@1 95.312 (95.312) | ||
814 | +Epoch: [125][10/12] Time 0.159 (0.334) Loss 0.1760 (0.1236) Prec@1 94.141 (95.774) | ||
815 | +Test: [0/2] Time 1.697 (1.697) Loss 0.8132 (0.8132) Prec@1 70.703 (70.703) | ||
816 | + * epoch: 125 Prec@1 72.072 | ||
817 | + * epoch: 125 Prec@1 72.072 | ||
818 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
819 | +Epoch: [126][0/12] Time 2.032 (2.032) Loss 0.1410 (0.1410) Prec@1 94.141 (94.141) | ||
820 | +Epoch: [126][10/12] Time 0.159 (0.330) Loss 0.1733 (0.1614) Prec@1 96.094 (93.928) | ||
821 | +Test: [0/2] Time 1.692 (1.692) Loss 0.4986 (0.4986) Prec@1 86.328 (86.328) | ||
822 | + * epoch: 126 Prec@1 87.688 | ||
823 | + * epoch: 126 Prec@1 87.688 | ||
824 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
825 | +Epoch: [127][0/12] Time 2.220 (2.220) Loss 0.1754 (0.1754) Prec@1 93.750 (93.750) | ||
826 | +Epoch: [127][10/12] Time 0.159 (0.346) Loss 0.1546 (0.1384) Prec@1 93.750 (94.922) | ||
827 | +Test: [0/2] Time 1.701 (1.701) Loss 0.9395 (0.9395) Prec@1 85.156 (85.156) | ||
828 | + * epoch: 127 Prec@1 86.787 | ||
829 | + * epoch: 127 Prec@1 86.787 | ||
830 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
831 | +Epoch: [128][0/12] Time 1.902 (1.902) Loss 0.1456 (0.1456) Prec@1 95.703 (95.703) | ||
832 | +Epoch: [128][10/12] Time 0.160 (0.322) Loss 0.1806 (0.1682) Prec@1 93.750 (94.496) | ||
833 | +Test: [0/2] Time 1.711 (1.711) Loss 0.5188 (0.5188) Prec@1 90.625 (90.625) | ||
834 | + * epoch: 128 Prec@1 90.691 | ||
835 | + * epoch: 128 Prec@1 90.691 | ||
836 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
837 | +Epoch: [129][0/12] Time 1.951 (1.951) Loss 0.0664 (0.0664) Prec@1 98.047 (98.047) | ||
838 | +Epoch: [129][10/12] Time 0.160 (0.323) Loss 0.1485 (0.1174) Prec@1 94.922 (96.058) | ||
839 | +Test: [0/2] Time 1.704 (1.704) Loss 0.6762 (0.6762) Prec@1 89.062 (89.062) | ||
840 | + * epoch: 129 Prec@1 90.090 | ||
841 | + * epoch: 129 Prec@1 90.090 | ||
842 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
843 | +Epoch: [130][0/12] Time 1.943 (1.943) Loss 0.2280 (0.2280) Prec@1 92.578 (92.578) | ||
844 | +Epoch: [130][10/12] Time 0.160 (0.332) Loss 0.2291 (0.1751) Prec@1 94.531 (93.786) | ||
845 | +Test: [0/2] Time 1.717 (1.717) Loss 0.4670 (0.4670) Prec@1 91.406 (91.406) | ||
846 | + * epoch: 130 Prec@1 91.892 | ||
847 | + * epoch: 130 Prec@1 91.892 | ||
848 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
849 | +Epoch: [131][0/12] Time 2.158 (2.158) Loss 0.1494 (0.1494) Prec@1 94.141 (94.141) | ||
850 | +Epoch: [131][10/12] Time 0.159 (0.341) Loss 0.1707 (0.1408) Prec@1 93.750 (94.744) | ||
851 | +Test: [0/2] Time 1.699 (1.699) Loss 0.4758 (0.4758) Prec@1 87.891 (87.891) | ||
852 | + * epoch: 131 Prec@1 87.387 | ||
853 | + * epoch: 131 Prec@1 87.387 | ||
854 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
855 | +Epoch: [132][0/12] Time 1.991 (1.991) Loss 0.1658 (0.1658) Prec@1 92.578 (92.578) | ||
856 | +Epoch: [132][10/12] Time 0.159 (0.326) Loss 0.1663 (0.1657) Prec@1 94.922 (94.212) | ||
857 | +Test: [0/2] Time 1.708 (1.708) Loss 0.5929 (0.5929) Prec@1 91.016 (91.016) | ||
858 | + * epoch: 132 Prec@1 91.892 | ||
859 | + * epoch: 132 Prec@1 91.892 | ||
860 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
861 | +Epoch: [133][0/12] Time 1.973 (1.973) Loss 0.0819 (0.0819) Prec@1 97.656 (97.656) | ||
862 | +Epoch: [133][10/12] Time 0.159 (0.324) Loss 0.1221 (0.1269) Prec@1 94.922 (95.135) | ||
863 | +Test: [0/2] Time 1.696 (1.696) Loss 0.4731 (0.4731) Prec@1 92.578 (92.578) | ||
864 | + * epoch: 133 Prec@1 92.793 | ||
865 | + * epoch: 133 Prec@1 92.793 | ||
866 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
867 | +Epoch: [134][0/12] Time 1.971 (1.971) Loss 0.0957 (0.0957) Prec@1 98.047 (98.047) | ||
868 | +Epoch: [134][10/12] Time 0.160 (0.324) Loss 0.1335 (0.1506) Prec@1 93.359 (94.567) | ||
869 | +Test: [0/2] Time 1.715 (1.715) Loss 0.5484 (0.5484) Prec@1 91.797 (91.797) | ||
870 | + * epoch: 134 Prec@1 92.192 | ||
871 | + * epoch: 134 Prec@1 92.192 | ||
872 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
873 | +Epoch: [135][0/12] Time 2.178 (2.178) Loss 0.1302 (0.1302) Prec@1 93.750 (93.750) | ||
874 | +Epoch: [135][10/12] Time 0.161 (0.342) Loss 0.1452 (0.1391) Prec@1 94.922 (94.922) | ||
875 | +Test: [0/2] Time 1.709 (1.709) Loss 0.7818 (0.7818) Prec@1 89.453 (89.453) | ||
876 | + * epoch: 135 Prec@1 89.489 | ||
877 | + * epoch: 135 Prec@1 89.489 | ||
878 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
879 | +Epoch: [136][0/12] Time 1.960 (1.960) Loss 0.1286 (0.1286) Prec@1 96.094 (96.094) | ||
880 | +Epoch: [136][10/12] Time 0.160 (0.323) Loss 0.0955 (0.1380) Prec@1 97.656 (94.709) | ||
881 | +Test: [0/2] Time 1.717 (1.717) Loss 0.5795 (0.5795) Prec@1 89.453 (89.453) | ||
882 | + * epoch: 136 Prec@1 90.090 | ||
883 | + * epoch: 136 Prec@1 90.090 | ||
884 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
885 | +Epoch: [137][0/12] Time 1.968 (1.968) Loss 0.1092 (0.1092) Prec@1 94.531 (94.531) | ||
886 | +Epoch: [137][10/12] Time 0.160 (0.324) Loss 0.2211 (0.1232) Prec@1 93.359 (95.526) |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | + * Prec@1 98.193 | ||
5 | + * Prec@1 98.193 | ||
6 | +Best accuracy: 98.19277163585984 | ||
7 | +[validate_2020-03-26-17-09-34] done | ||
8 | +[validate_2020-03-26-17-09-34] done | ||
9 | +start test using path : ../data/Fourth_data/demo | ||
10 | +Test start | ||
11 | +loading checkpoint... | ||
12 | +checkpoint already loaded! | ||
13 | +start test | ||
14 | +data path directory is ../data/Fourth_data/demo | ||
15 | +finish test | ||
16 | +set Type | ||
17 | +start test using path : ../data/Fourth_data/demo | ||
18 | +Test start | ||
19 | +loading checkpoint... | ||
20 | +checkpoint already loaded! | ||
21 | +start test | ||
22 | +data path directory is ../data/Fourth_data/demo | ||
23 | +finish test | ||
24 | +train start | ||
25 | +load yml file | ||
26 | +2020-03-26-17-10-28 | ||
27 | +use seed 825 | ||
28 | +use dataset : ../data/Fourth_data/All | ||
29 | +{'task': 'All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam-class': 'Crack', 'cam': False, 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-26-17-10-28'} | ||
30 | +using normalize | ||
31 | +using no dropout | ||
32 | +using SGD | ||
33 | +Number of model parameters: 461559 | ||
34 | +Epoch: [0][0/12] Time 3.022 (3.022) Loss 1.9330 (1.9330) Prec@1 16.016 (16.016) | ||
35 | +Epoch: [0][10/12] Time 0.159 (0.411) Loss 1.6084 (1.7482) Prec@1 23.828 (17.898) | ||
36 | +Test: [0/2] Time 1.651 (1.651) Loss 1.8596 (1.8596) Prec@1 10.938 (10.938) | ||
37 | + * epoch: 0 Prec@1 12.012 | ||
38 | + * epoch: 0 Prec@1 12.012 | ||
39 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
40 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
41 | +Epoch: [1][0/12] Time 1.962 (1.962) Loss 1.5773 (1.5773) Prec@1 28.516 (28.516) | ||
42 | +Epoch: [1][10/12] Time 0.158 (0.323) Loss 1.6759 (1.5514) Prec@1 20.312 (26.136) | ||
43 | +Test: [0/2] Time 1.653 (1.653) Loss 1.8462 (1.8462) Prec@1 31.250 (31.250) | ||
44 | + * epoch: 1 Prec@1 30.030 | ||
45 | + * epoch: 1 Prec@1 30.030 | ||
46 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
47 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
48 | +Epoch: [2][0/12] Time 1.952 (1.952) Loss 1.5458 (1.5458) Prec@1 26.562 (26.562) | ||
49 | +Epoch: [2][10/12] Time 0.157 (0.331) Loss 1.2252 (1.3803) Prec@1 42.969 (31.889) | ||
50 | +Test: [0/2] Time 1.674 (1.674) Loss 1.7407 (1.7407) Prec@1 30.859 (30.859) | ||
51 | + * epoch: 2 Prec@1 32.733 | ||
52 | + * epoch: 2 Prec@1 32.733 | ||
53 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
54 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
55 | +Epoch: [3][0/12] Time 1.960 (1.960) Loss 1.2237 (1.2237) Prec@1 39.062 (39.062) | ||
56 | +Epoch: [3][10/12] Time 0.155 (0.323) Loss 1.1566 (1.2237) Prec@1 41.797 (41.193) | ||
57 | +Test: [0/2] Time 1.687 (1.687) Loss 1.7368 (1.7368) Prec@1 22.266 (22.266) | ||
58 | + * epoch: 3 Prec@1 22.823 | ||
59 | + * epoch: 3 Prec@1 22.823 | ||
60 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
61 | +Epoch: [4][0/12] Time 1.967 (1.967) Loss 1.3065 (1.3065) Prec@1 48.828 (48.828) | ||
62 | +Epoch: [4][10/12] Time 0.162 (0.335) Loss 1.0501 (1.1183) Prec@1 58.594 (53.906) | ||
63 | +Test: [0/2] Time 1.665 (1.665) Loss 1.1960 (1.1960) Prec@1 53.516 (53.516) | ||
64 | + * epoch: 4 Prec@1 52.553 | ||
65 | + * epoch: 4 Prec@1 52.553 | ||
66 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
67 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
68 | +Epoch: [5][0/12] Time 1.930 (1.930) Loss 1.0283 (1.0283) Prec@1 61.719 (61.719) | ||
69 | +Epoch: [5][10/12] Time 0.159 (0.320) Loss 1.0155 (1.0390) Prec@1 58.594 (60.227) | ||
70 | +Test: [0/2] Time 1.692 (1.692) Loss 1.2309 (1.2309) Prec@1 60.938 (60.938) | ||
71 | + * epoch: 5 Prec@1 60.360 | ||
72 | + * epoch: 5 Prec@1 60.360 | ||
73 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
74 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
75 | +Epoch: [6][0/12] Time 2.214 (2.214) Loss 0.9515 (0.9515) Prec@1 64.844 (64.844) | ||
76 | +Epoch: [6][10/12] Time 0.158 (0.346) Loss 0.7833 (0.9050) Prec@1 75.781 (68.999) | ||
77 | +Test: [0/2] Time 1.663 (1.663) Loss 0.9625 (0.9625) Prec@1 49.609 (49.609) | ||
78 | + * epoch: 6 Prec@1 49.850 | ||
79 | + * epoch: 6 Prec@1 49.850 | ||
80 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
81 | +Epoch: [7][0/12] Time 1.919 (1.919) Loss 0.7668 (0.7668) Prec@1 76.172 (76.172) | ||
82 | +Epoch: [7][10/12] Time 0.158 (0.319) Loss 0.9260 (0.8527) Prec@1 63.672 (71.768) | ||
83 | +Test: [0/2] Time 1.675 (1.675) Loss 1.1891 (1.1891) Prec@1 61.328 (61.328) | ||
84 | + * epoch: 7 Prec@1 58.859 | ||
85 | + * epoch: 7 Prec@1 58.859 | ||
86 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
87 | +Epoch: [8][0/12] Time 2.244 (2.244) Loss 0.8546 (0.8546) Prec@1 72.266 (72.266) | ||
88 | +Epoch: [8][10/12] Time 0.159 (0.348) Loss 0.7712 (0.8669) Prec@1 76.562 (71.094) | ||
89 | +Test: [0/2] Time 1.707 (1.707) Loss 0.9092 (0.9092) Prec@1 63.281 (63.281) | ||
90 | + * epoch: 8 Prec@1 62.462 | ||
91 | + * epoch: 8 Prec@1 62.462 | ||
92 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
93 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
94 | +Epoch: [9][0/12] Time 1.925 (1.925) Loss 0.7878 (0.7878) Prec@1 78.516 (78.516) | ||
95 | +Epoch: [9][10/12] Time 0.160 (0.325) Loss 0.7174 (0.7807) Prec@1 69.922 (73.509) | ||
96 | +Test: [0/2] Time 1.707 (1.707) Loss 0.9544 (0.9544) Prec@1 62.891 (62.891) | ||
97 | + * epoch: 9 Prec@1 60.661 | ||
98 | + * epoch: 9 Prec@1 60.661 | ||
99 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
100 | +Epoch: [10][0/12] Time 2.220 (2.220) Loss 0.9374 (0.9374) Prec@1 67.188 (67.188) | ||
101 | +Epoch: [10][10/12] Time 0.159 (0.347) Loss 0.7273 (0.7483) Prec@1 80.078 (75.497) | ||
102 | +Test: [0/2] Time 1.696 (1.696) Loss 0.9895 (0.9895) Prec@1 47.656 (47.656) | ||
103 | + * epoch: 10 Prec@1 48.649 | ||
104 | + * epoch: 10 Prec@1 48.649 | ||
105 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
106 | +Epoch: [11][0/12] Time 1.974 (1.974) Loss 0.6920 (0.6920) Prec@1 75.391 (75.391) | ||
107 | +Epoch: [11][10/12] Time 0.154 (0.324) Loss 0.7892 (0.6962) Prec@1 71.484 (75.781) | ||
108 | +Test: [0/2] Time 1.665 (1.665) Loss 1.1736 (1.1736) Prec@1 78.125 (78.125) | ||
109 | + * epoch: 11 Prec@1 77.778 | ||
110 | + * epoch: 11 Prec@1 77.778 | ||
111 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
112 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
113 | +Epoch: [12][0/12] Time 1.927 (1.927) Loss 0.7375 (0.7375) Prec@1 78.906 (78.906) | ||
114 | +Epoch: [12][10/12] Time 0.159 (0.328) Loss 0.6453 (0.6972) Prec@1 79.688 (77.308) | ||
115 | +Test: [0/2] Time 1.703 (1.703) Loss 0.7701 (0.7701) Prec@1 79.688 (79.688) | ||
116 | + * epoch: 12 Prec@1 79.279 | ||
117 | + * epoch: 12 Prec@1 79.279 | ||
118 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
119 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
120 | +Epoch: [13][0/12] Time 1.983 (1.983) Loss 0.6737 (0.6737) Prec@1 79.688 (79.688) | ||
121 | +Epoch: [13][10/12] Time 0.158 (0.325) Loss 0.6125 (0.6647) Prec@1 79.688 (78.942) | ||
122 | +Test: [0/2] Time 1.673 (1.673) Loss 1.7411 (1.7411) Prec@1 44.922 (44.922) | ||
123 | + * epoch: 13 Prec@1 44.745 | ||
124 | + * epoch: 13 Prec@1 44.745 | ||
125 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
126 | +Epoch: [14][0/12] Time 1.969 (1.969) Loss 0.7033 (0.7033) Prec@1 76.953 (76.953) | ||
127 | +Epoch: [14][10/12] Time 0.160 (0.328) Loss 0.7069 (0.6706) Prec@1 78.516 (78.196) | ||
128 | +Test: [0/2] Time 1.689 (1.689) Loss 0.7330 (0.7330) Prec@1 80.078 (80.078) | ||
129 | + * epoch: 14 Prec@1 80.781 | ||
130 | + * epoch: 14 Prec@1 80.781 | ||
131 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
132 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
133 | +Epoch: [15][0/12] Time 1.957 (1.957) Loss 0.6477 (0.6477) Prec@1 76.562 (76.562) | ||
134 | +Epoch: [15][10/12] Time 0.160 (0.323) Loss 0.6251 (0.6128) Prec@1 81.641 (80.007) | ||
135 | +Test: [0/2] Time 1.675 (1.675) Loss 0.7821 (0.7821) Prec@1 76.562 (76.562) | ||
136 | + * epoch: 15 Prec@1 76.877 | ||
137 | + * epoch: 15 Prec@1 76.877 | ||
138 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
139 | +Epoch: [16][0/12] Time 2.133 (2.133) Loss 0.6314 (0.6314) Prec@1 77.734 (77.734) | ||
140 | +Epoch: [16][10/12] Time 0.159 (0.338) Loss 0.6333 (0.6552) Prec@1 80.469 (79.190) | ||
141 | +Test: [0/2] Time 1.686 (1.686) Loss 0.8334 (0.8334) Prec@1 81.641 (81.641) | ||
142 | + * epoch: 16 Prec@1 81.982 | ||
143 | + * epoch: 16 Prec@1 81.982 | ||
144 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
145 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
146 | +Epoch: [17][0/12] Time 1.903 (1.903) Loss 0.5610 (0.5610) Prec@1 79.297 (79.297) | ||
147 | +Epoch: [17][10/12] Time 0.158 (0.324) Loss 0.5548 (0.5905) Prec@1 77.734 (81.001) | ||
148 | +Test: [0/2] Time 1.713 (1.713) Loss 0.7781 (0.7781) Prec@1 82.422 (82.422) | ||
149 | + * epoch: 17 Prec@1 81.381 | ||
150 | + * epoch: 17 Prec@1 81.381 | ||
151 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
152 | +Epoch: [18][0/12] Time 1.914 (1.914) Loss 0.5484 (0.5484) Prec@1 77.344 (77.344) | ||
153 | +Epoch: [18][10/12] Time 0.159 (0.320) Loss 0.6511 (0.6283) Prec@1 79.688 (79.545) | ||
154 | +Test: [0/2] Time 1.666 (1.666) Loss 0.8708 (0.8708) Prec@1 62.500 (62.500) | ||
155 | + * epoch: 18 Prec@1 63.664 | ||
156 | + * epoch: 18 Prec@1 63.664 | ||
157 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
158 | +Epoch: [19][0/12] Time 1.918 (1.918) Loss 0.6416 (0.6416) Prec@1 79.297 (79.297) | ||
159 | +Epoch: [19][10/12] Time 0.158 (0.320) Loss 0.5400 (0.5515) Prec@1 83.203 (83.097) | ||
160 | +Test: [0/2] Time 1.694 (1.694) Loss 0.6527 (0.6527) Prec@1 81.250 (81.250) | ||
161 | + * epoch: 19 Prec@1 81.081 | ||
162 | + * epoch: 19 Prec@1 81.081 | ||
163 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
164 | +Epoch: [20][0/12] Time 2.206 (2.206) Loss 0.5358 (0.5358) Prec@1 81.641 (81.641) | ||
165 | +Epoch: [20][10/12] Time 0.160 (0.345) Loss 0.6041 (0.5729) Prec@1 77.734 (81.428) | ||
166 | +Test: [0/2] Time 1.720 (1.720) Loss 0.5669 (0.5669) Prec@1 83.594 (83.594) | ||
167 | + * epoch: 20 Prec@1 81.682 | ||
168 | + * epoch: 20 Prec@1 81.682 | ||
169 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
170 | +Epoch: [21][0/12] Time 2.203 (2.203) Loss 0.4921 (0.4921) Prec@1 82.422 (82.422) | ||
171 | +Epoch: [21][10/12] Time 0.158 (0.345) Loss 0.4483 (0.5416) Prec@1 84.375 (82.919) | ||
172 | +Test: [0/2] Time 1.706 (1.706) Loss 0.7614 (0.7614) Prec@1 78.516 (78.516) | ||
173 | + * epoch: 21 Prec@1 78.679 | ||
174 | + * epoch: 21 Prec@1 78.679 | ||
175 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
176 | +Epoch: [22][0/12] Time 1.921 (1.921) Loss 0.6461 (0.6461) Prec@1 81.250 (81.250) | ||
177 | +Epoch: [22][10/12] Time 0.158 (0.319) Loss 0.5921 (0.5490) Prec@1 82.031 (82.919) | ||
178 | +Test: [0/2] Time 1.709 (1.709) Loss 0.6422 (0.6422) Prec@1 82.812 (82.812) | ||
179 | + * epoch: 22 Prec@1 83.784 | ||
180 | + * epoch: 22 Prec@1 83.784 | ||
181 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
182 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
183 | +Epoch: [23][0/12] Time 1.939 (1.939) Loss 0.4573 (0.4573) Prec@1 87.500 (87.500) | ||
184 | +Epoch: [23][10/12] Time 0.159 (0.331) Loss 0.5843 (0.5141) Prec@1 80.469 (84.624) | ||
185 | +Test: [0/2] Time 1.685 (1.685) Loss 0.9102 (0.9102) Prec@1 75.000 (75.000) | ||
186 | + * epoch: 23 Prec@1 76.577 | ||
187 | + * epoch: 23 Prec@1 76.577 | ||
188 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
189 | +Epoch: [24][0/12] Time 1.925 (1.925) Loss 0.7865 (0.7865) Prec@1 78.125 (78.125) | ||
190 | +Epoch: [24][10/12] Time 0.160 (0.319) Loss 0.5571 (0.6079) Prec@1 85.938 (80.859) | ||
191 | +Test: [0/2] Time 1.712 (1.712) Loss 0.5972 (0.5972) Prec@1 82.422 (82.422) | ||
192 | + * epoch: 24 Prec@1 82.583 | ||
193 | + * epoch: 24 Prec@1 82.583 | ||
194 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
195 | +Epoch: [25][0/12] Time 1.958 (1.958) Loss 0.5632 (0.5632) Prec@1 83.984 (83.984) | ||
196 | +Epoch: [25][10/12] Time 0.159 (0.322) Loss 0.3754 (0.5158) Prec@1 88.281 (83.700) | ||
197 | +Test: [0/2] Time 1.701 (1.701) Loss 0.8734 (0.8734) Prec@1 77.734 (77.734) | ||
198 | + * epoch: 25 Prec@1 78.378 | ||
199 | + * epoch: 25 Prec@1 78.378 | ||
200 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
201 | +Epoch: [26][0/12] Time 2.162 (2.162) Loss 0.6764 (0.6764) Prec@1 81.641 (81.641) | ||
202 | +Epoch: [26][10/12] Time 0.160 (0.342) Loss 0.5274 (0.5528) Prec@1 80.469 (82.209) | ||
203 | +Test: [0/2] Time 1.696 (1.696) Loss 0.6240 (0.6240) Prec@1 85.938 (85.938) | ||
204 | + * epoch: 26 Prec@1 84.084 | ||
205 | + * epoch: 26 Prec@1 84.084 | ||
206 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
207 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
208 | +Epoch: [27][0/12] Time 2.181 (2.181) Loss 0.5241 (0.5241) Prec@1 83.203 (83.203) | ||
209 | +Epoch: [27][10/12] Time 0.158 (0.343) Loss 0.6147 (0.5227) Prec@1 79.297 (83.736) | ||
210 | +Test: [0/2] Time 1.708 (1.708) Loss 0.7268 (0.7268) Prec@1 80.469 (80.469) | ||
211 | + * epoch: 27 Prec@1 79.279 | ||
212 | + * epoch: 27 Prec@1 79.279 | ||
213 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
214 | +Epoch: [28][0/12] Time 1.966 (1.966) Loss 0.4439 (0.4439) Prec@1 87.500 (87.500) | ||
215 | +Epoch: [28][10/12] Time 0.159 (0.323) Loss 0.5766 (0.5243) Prec@1 85.547 (82.955) | ||
216 | +Test: [0/2] Time 1.684 (1.684) Loss 0.6416 (0.6416) Prec@1 82.031 (82.031) | ||
217 | + * epoch: 28 Prec@1 82.583 | ||
218 | + * epoch: 28 Prec@1 82.583 | ||
219 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
220 | +Epoch: [29][0/12] Time 1.943 (1.943) Loss 0.5633 (0.5633) Prec@1 79.297 (79.297) | ||
221 | +Epoch: [29][10/12] Time 0.159 (0.321) Loss 0.4740 (0.4708) Prec@1 85.547 (84.730) | ||
222 | +Test: [0/2] Time 1.704 (1.704) Loss 0.6950 (0.6950) Prec@1 78.125 (78.125) | ||
223 | + * epoch: 29 Prec@1 78.378 | ||
224 | + * epoch: 29 Prec@1 78.378 | ||
225 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
226 | +Epoch: [30][0/12] Time 1.953 (1.953) Loss 0.6137 (0.6137) Prec@1 82.422 (82.422) | ||
227 | +Epoch: [30][10/12] Time 0.159 (0.322) Loss 0.3596 (0.5028) Prec@1 87.891 (84.553) | ||
228 | +Test: [0/2] Time 1.699 (1.699) Loss 0.6546 (0.6546) Prec@1 82.031 (82.031) | ||
229 | + * epoch: 30 Prec@1 83.483 | ||
230 | + * epoch: 30 Prec@1 83.483 | ||
231 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
232 | +Epoch: [31][0/12] Time 2.174 (2.174) Loss 0.4846 (0.4846) Prec@1 83.984 (83.984) | ||
233 | +Epoch: [31][10/12] Time 0.160 (0.343) Loss 0.4474 (0.4548) Prec@1 84.375 (85.050) | ||
234 | +Test: [0/2] Time 1.678 (1.678) Loss 0.6281 (0.6281) Prec@1 84.375 (84.375) | ||
235 | + * epoch: 31 Prec@1 84.084 | ||
236 | + * epoch: 31 Prec@1 84.084 | ||
237 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
238 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
239 | +Epoch: [32][0/12] Time 1.929 (1.929) Loss 0.4780 (0.4780) Prec@1 83.594 (83.594) | ||
240 | +Epoch: [32][10/12] Time 0.160 (0.320) Loss 0.4670 (0.5008) Prec@1 85.938 (83.842) | ||
241 | +Test: [0/2] Time 1.698 (1.698) Loss 0.5501 (0.5501) Prec@1 82.422 (82.422) | ||
242 | + * epoch: 32 Prec@1 82.583 | ||
243 | + * epoch: 32 Prec@1 82.583 | ||
244 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
245 | +Epoch: [33][0/12] Time 2.224 (2.224) Loss 0.4372 (0.4372) Prec@1 89.062 (89.062) | ||
246 | +Epoch: [33][10/12] Time 0.159 (0.347) Loss 0.4002 (0.4672) Prec@1 87.500 (85.369) | ||
247 | +Test: [0/2] Time 1.650 (1.650) Loss 0.9937 (0.9937) Prec@1 65.234 (65.234) | ||
248 | + * epoch: 33 Prec@1 65.165 | ||
249 | + * epoch: 33 Prec@1 65.165 | ||
250 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
251 | +Epoch: [34][0/12] Time 2.233 (2.233) Loss 0.5080 (0.5080) Prec@1 82.031 (82.031) | ||
252 | +Epoch: [34][10/12] Time 0.159 (0.347) Loss 0.4675 (0.4632) Prec@1 84.766 (84.908) | ||
253 | +Test: [0/2] Time 1.673 (1.673) Loss 0.6285 (0.6285) Prec@1 83.984 (83.984) | ||
254 | + * epoch: 34 Prec@1 84.084 | ||
255 | + * epoch: 34 Prec@1 84.084 | ||
256 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
257 | +Epoch: [35][0/12] Time 1.915 (1.915) Loss 0.4910 (0.4910) Prec@1 86.328 (86.328) | ||
258 | +Epoch: [35][10/12] Time 0.159 (0.320) Loss 0.3640 (0.4289) Prec@1 88.281 (86.222) | ||
259 | +Test: [0/2] Time 1.725 (1.725) Loss 0.9229 (0.9229) Prec@1 80.859 (80.859) | ||
260 | + * epoch: 35 Prec@1 80.180 | ||
261 | + * epoch: 35 Prec@1 80.180 | ||
262 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
263 | +Epoch: [36][0/12] Time 1.938 (1.938) Loss 0.5173 (0.5173) Prec@1 81.641 (81.641) | ||
264 | +Epoch: [36][10/12] Time 0.159 (0.321) Loss 0.5336 (0.4753) Prec@1 83.203 (84.553) | ||
265 | +Test: [0/2] Time 1.725 (1.725) Loss 0.5637 (0.5637) Prec@1 87.109 (87.109) | ||
266 | + * epoch: 36 Prec@1 86.186 | ||
267 | + * epoch: 36 Prec@1 86.186 | ||
268 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
269 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
270 | +Epoch: [37][0/12] Time 2.224 (2.224) Loss 0.3901 (0.3901) Prec@1 86.719 (86.719) | ||
271 | +Epoch: [37][10/12] Time 0.159 (0.347) Loss 0.4682 (0.4061) Prec@1 82.422 (86.648) | ||
272 | +Test: [0/2] Time 1.686 (1.686) Loss 0.9415 (0.9415) Prec@1 74.219 (74.219) | ||
273 | + * epoch: 37 Prec@1 74.474 | ||
274 | + * epoch: 37 Prec@1 74.474 | ||
275 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
276 | +Epoch: [38][0/12] Time 1.960 (1.960) Loss 0.4797 (0.4797) Prec@1 84.375 (84.375) | ||
277 | +Epoch: [38][10/12] Time 0.159 (0.324) Loss 0.4020 (0.4410) Prec@1 88.672 (85.653) | ||
278 | +Test: [0/2] Time 1.692 (1.692) Loss 0.4875 (0.4875) Prec@1 84.375 (84.375) | ||
279 | + * epoch: 38 Prec@1 83.483 | ||
280 | + * epoch: 38 Prec@1 83.483 | ||
281 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
282 | +Epoch: [39][0/12] Time 1.931 (1.931) Loss 0.4579 (0.4579) Prec@1 85.156 (85.156) | ||
283 | +Epoch: [39][10/12] Time 0.159 (0.330) Loss 0.3570 (0.3953) Prec@1 87.500 (86.754) | ||
284 | +Test: [0/2] Time 1.673 (1.673) Loss 0.6890 (0.6890) Prec@1 84.766 (84.766) | ||
285 | + * epoch: 39 Prec@1 83.784 | ||
286 | + * epoch: 39 Prec@1 83.784 | ||
287 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
288 | +Epoch: [40][0/12] Time 1.908 (1.908) Loss 0.5327 (0.5327) Prec@1 82.031 (82.031) | ||
289 | +Epoch: [40][10/12] Time 0.160 (0.320) Loss 0.3981 (0.4445) Prec@1 88.281 (85.724) | ||
290 | +Test: [0/2] Time 1.706 (1.706) Loss 0.4213 (0.4213) Prec@1 88.672 (88.672) | ||
291 | + * epoch: 40 Prec@1 87.387 | ||
292 | + * epoch: 40 Prec@1 87.387 | ||
293 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
294 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
295 | +Epoch: [41][0/12] Time 1.924 (1.924) Loss 0.4044 (0.4044) Prec@1 85.938 (85.938) | ||
296 | +Epoch: [41][10/12] Time 0.160 (0.318) Loss 0.4958 (0.3959) Prec@1 86.719 (86.861) | ||
297 | +Test: [0/2] Time 1.699 (1.699) Loss 0.7511 (0.7511) Prec@1 76.172 (76.172) | ||
298 | + * epoch: 41 Prec@1 74.174 | ||
299 | + * epoch: 41 Prec@1 74.174 | ||
300 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
301 | +Epoch: [42][0/12] Time 2.204 (2.204) Loss 0.4268 (0.4268) Prec@1 88.281 (88.281) | ||
302 | +Epoch: [42][10/12] Time 0.158 (0.345) Loss 0.4259 (0.4605) Prec@1 87.109 (84.979) | ||
303 | +Test: [0/2] Time 1.730 (1.730) Loss 0.6196 (0.6196) Prec@1 87.109 (87.109) | ||
304 | + * epoch: 42 Prec@1 87.387 | ||
305 | + * epoch: 42 Prec@1 87.387 | ||
306 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
307 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
308 | +Epoch: [43][0/12] Time 2.063 (2.063) Loss 0.4617 (0.4617) Prec@1 84.375 (84.375) | ||
309 | +Epoch: [43][10/12] Time 0.159 (0.332) Loss 0.4178 (0.3862) Prec@1 87.891 (87.287) | ||
310 | +Test: [0/2] Time 1.692 (1.692) Loss 0.7116 (0.7116) Prec@1 78.516 (78.516) | ||
311 | + * epoch: 43 Prec@1 78.378 | ||
312 | + * epoch: 43 Prec@1 78.378 | ||
313 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
314 | +Epoch: [44][0/12] Time 1.968 (1.968) Loss 0.6055 (0.6055) Prec@1 76.953 (76.953) | ||
315 | +Epoch: [44][10/12] Time 0.160 (0.331) Loss 0.4583 (0.4649) Prec@1 85.156 (83.487) | ||
316 | +Test: [0/2] Time 1.690 (1.690) Loss 0.4601 (0.4601) Prec@1 87.109 (87.109) | ||
317 | + * epoch: 44 Prec@1 86.787 | ||
318 | + * epoch: 44 Prec@1 86.787 | ||
319 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
320 | +Epoch: [45][0/12] Time 1.908 (1.908) Loss 0.4238 (0.4238) Prec@1 87.109 (87.109) | ||
321 | +Epoch: [45][10/12] Time 0.159 (0.318) Loss 0.5030 (0.3826) Prec@1 82.812 (87.074) | ||
322 | +Test: [0/2] Time 1.707 (1.707) Loss 0.7285 (0.7285) Prec@1 79.688 (79.688) | ||
323 | + * epoch: 45 Prec@1 80.781 | ||
324 | + * epoch: 45 Prec@1 80.781 | ||
325 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
326 | +Epoch: [46][0/12] Time 1.955 (1.955) Loss 0.5765 (0.5765) Prec@1 81.250 (81.250) | ||
327 | +Epoch: [46][10/12] Time 0.159 (0.321) Loss 0.3067 (0.4130) Prec@1 88.672 (86.009) | ||
328 | +Test: [0/2] Time 1.686 (1.686) Loss 0.4962 (0.4962) Prec@1 89.062 (89.062) | ||
329 | + * epoch: 46 Prec@1 88.288 | ||
330 | + * epoch: 46 Prec@1 88.288 | ||
331 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
332 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
333 | +Epoch: [47][0/12] Time 1.986 (1.986) Loss 0.3760 (0.3760) Prec@1 87.891 (87.891) | ||
334 | +Epoch: [47][10/12] Time 0.159 (0.335) Loss 0.3775 (0.3699) Prec@1 84.766 (87.216) | ||
335 | +Test: [0/2] Time 1.697 (1.697) Loss 0.5537 (0.5537) Prec@1 85.938 (85.938) | ||
336 | + * epoch: 47 Prec@1 85.886 | ||
337 | + * epoch: 47 Prec@1 85.886 | ||
338 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
339 | +Epoch: [48][0/12] Time 1.951 (1.951) Loss 0.3737 (0.3737) Prec@1 87.500 (87.500) | ||
340 | +Epoch: [48][10/12] Time 0.159 (0.322) Loss 0.3257 (0.3962) Prec@1 88.672 (87.145) | ||
341 | +Test: [0/2] Time 1.693 (1.693) Loss 0.5342 (0.5342) Prec@1 87.109 (87.109) | ||
342 | + * epoch: 48 Prec@1 85.586 | ||
343 | + * epoch: 48 Prec@1 85.586 | ||
344 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
345 | +Epoch: [49][0/12] Time 1.991 (1.991) Loss 0.3158 (0.3158) Prec@1 88.672 (88.672) | ||
346 | +Epoch: [49][10/12] Time 0.159 (0.326) Loss 0.4068 (0.3745) Prec@1 87.109 (87.571) | ||
347 | +Test: [0/2] Time 1.684 (1.684) Loss 0.5855 (0.5855) Prec@1 81.641 (81.641) | ||
348 | + * epoch: 49 Prec@1 80.781 | ||
349 | + * epoch: 49 Prec@1 80.781 | ||
350 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
351 | +Epoch: [50][0/12] Time 1.904 (1.904) Loss 0.6005 (0.6005) Prec@1 79.297 (79.297) | ||
352 | +Epoch: [50][10/12] Time 0.159 (0.319) Loss 0.3856 (0.3787) Prec@1 86.328 (87.464) | ||
353 | +Test: [0/2] Time 1.680 (1.680) Loss 0.6797 (0.6797) Prec@1 77.344 (77.344) | ||
354 | + * epoch: 50 Prec@1 78.979 | ||
355 | + * epoch: 50 Prec@1 78.979 | ||
356 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
357 | +Epoch: [51][0/12] Time 2.151 (2.151) Loss 0.3381 (0.3381) Prec@1 89.844 (89.844) | ||
358 | +Epoch: [51][10/12] Time 0.157 (0.340) Loss 0.3905 (0.3525) Prec@1 83.594 (88.352) | ||
359 | +Test: [0/2] Time 1.705 (1.705) Loss 0.5561 (0.5561) Prec@1 86.328 (86.328) | ||
360 | + * epoch: 51 Prec@1 85.886 | ||
361 | + * epoch: 51 Prec@1 85.886 | ||
362 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
363 | +Epoch: [52][0/12] Time 1.977 (1.977) Loss 0.3419 (0.3419) Prec@1 88.281 (88.281) | ||
364 | +Epoch: [52][10/12] Time 0.159 (0.325) Loss 0.3898 (0.3460) Prec@1 88.281 (88.565) | ||
365 | +Test: [0/2] Time 1.704 (1.704) Loss 0.4699 (0.4699) Prec@1 86.328 (86.328) | ||
366 | + * epoch: 52 Prec@1 85.285 | ||
367 | + * epoch: 52 Prec@1 85.285 | ||
368 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
369 | +Epoch: [53][0/12] Time 1.954 (1.954) Loss 0.2477 (0.2477) Prec@1 88.672 (88.672) | ||
370 | +Epoch: [53][10/12] Time 0.159 (0.330) Loss 0.3368 (0.3348) Prec@1 88.672 (88.920) | ||
371 | +Test: [0/2] Time 1.672 (1.672) Loss 0.9721 (0.9721) Prec@1 60.547 (60.547) | ||
372 | + * epoch: 53 Prec@1 61.261 | ||
373 | + * epoch: 53 Prec@1 61.261 | ||
374 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
375 | +Epoch: [54][0/12] Time 2.249 (2.249) Loss 0.4255 (0.4255) Prec@1 82.031 (82.031) | ||
376 | +Epoch: [54][10/12] Time 0.160 (0.349) Loss 0.3790 (0.3706) Prec@1 85.547 (86.506) | ||
377 | +Test: [0/2] Time 1.664 (1.664) Loss 0.4914 (0.4914) Prec@1 88.281 (88.281) | ||
378 | + * epoch: 54 Prec@1 88.889 | ||
379 | + * epoch: 54 Prec@1 88.889 | ||
380 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
381 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
382 | +Epoch: [55][0/12] Time 1.919 (1.919) Loss 0.3694 (0.3694) Prec@1 85.547 (85.547) | ||
383 | +Epoch: [55][10/12] Time 0.159 (0.319) Loss 0.3774 (0.3204) Prec@1 88.672 (89.240) | ||
384 | +Test: [0/2] Time 1.689 (1.689) Loss 0.7353 (0.7353) Prec@1 80.078 (80.078) | ||
385 | + * epoch: 55 Prec@1 79.880 | ||
386 | + * epoch: 55 Prec@1 79.880 | ||
387 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
388 | +Epoch: [56][0/12] Time 1.947 (1.947) Loss 0.4603 (0.4603) Prec@1 83.594 (83.594) | ||
389 | +Epoch: [56][10/12] Time 0.159 (0.322) Loss 0.3666 (0.3867) Prec@1 87.891 (86.932) | ||
390 | +Test: [0/2] Time 1.722 (1.722) Loss 0.4614 (0.4614) Prec@1 86.719 (86.719) | ||
391 | + * epoch: 56 Prec@1 86.486 | ||
392 | + * epoch: 56 Prec@1 86.486 | ||
393 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
394 | +Epoch: [57][0/12] Time 1.980 (1.980) Loss 0.3089 (0.3089) Prec@1 89.844 (89.844) | ||
395 | +Epoch: [57][10/12] Time 0.160 (0.323) Loss 0.2758 (0.3167) Prec@1 90.234 (89.027) | ||
396 | +Test: [0/2] Time 1.713 (1.713) Loss 0.7926 (0.7926) Prec@1 69.141 (69.141) | ||
397 | + * epoch: 57 Prec@1 68.769 | ||
398 | + * epoch: 57 Prec@1 68.769 | ||
399 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
400 | +Epoch: [58][0/12] Time 1.897 (1.897) Loss 0.3536 (0.3536) Prec@1 86.328 (86.328) | ||
401 | +Epoch: [58][10/12] Time 0.160 (0.318) Loss 0.3304 (0.3711) Prec@1 91.406 (87.109) | ||
402 | +Test: [0/2] Time 1.722 (1.722) Loss 0.4612 (0.4612) Prec@1 89.062 (89.062) | ||
403 | + * epoch: 58 Prec@1 89.790 | ||
404 | + * epoch: 58 Prec@1 89.790 | ||
405 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
406 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
407 | +Epoch: [59][0/12] Time 2.226 (2.226) Loss 0.2936 (0.2936) Prec@1 91.406 (91.406) | ||
408 | +Epoch: [59][10/12] Time 0.159 (0.348) Loss 0.3097 (0.3106) Prec@1 87.500 (89.560) | ||
409 | +Test: [0/2] Time 1.691 (1.691) Loss 0.5900 (0.5900) Prec@1 83.984 (83.984) | ||
410 | + * epoch: 59 Prec@1 85.285 | ||
411 | + * epoch: 59 Prec@1 85.285 | ||
412 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
413 | +Epoch: [60][0/12] Time 2.012 (2.012) Loss 0.2896 (0.2896) Prec@1 90.234 (90.234) | ||
414 | +Epoch: [60][10/12] Time 0.158 (0.326) Loss 0.3392 (0.3331) Prec@1 87.891 (88.246) | ||
415 | +Test: [0/2] Time 1.725 (1.725) Loss 0.5262 (0.5262) Prec@1 89.453 (89.453) | ||
416 | + * epoch: 60 Prec@1 88.889 | ||
417 | + * epoch: 60 Prec@1 88.889 | ||
418 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
419 | +Epoch: [61][0/12] Time 1.936 (1.936) Loss 0.3321 (0.3321) Prec@1 88.281 (88.281) | ||
420 | +Epoch: [61][10/12] Time 0.159 (0.320) Loss 0.3931 (0.3128) Prec@1 87.891 (89.347) | ||
421 | +Test: [0/2] Time 1.672 (1.672) Loss 0.8900 (0.8900) Prec@1 57.812 (57.812) | ||
422 | + * epoch: 61 Prec@1 58.258 | ||
423 | + * epoch: 61 Prec@1 58.258 | ||
424 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
425 | +Epoch: [62][0/12] Time 1.955 (1.955) Loss 0.3191 (0.3191) Prec@1 89.844 (89.844) | ||
426 | +Epoch: [62][10/12] Time 0.154 (0.322) Loss 0.3570 (0.3385) Prec@1 88.281 (88.636) | ||
427 | +Test: [0/2] Time 1.709 (1.709) Loss 0.4341 (0.4341) Prec@1 86.719 (86.719) | ||
428 | + * epoch: 62 Prec@1 86.486 | ||
429 | + * epoch: 62 Prec@1 86.486 | ||
430 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
431 | +Epoch: [63][0/12] Time 1.930 (1.930) Loss 0.2078 (0.2078) Prec@1 92.188 (92.188) | ||
432 | +Epoch: [63][10/12] Time 0.159 (0.320) Loss 0.3926 (0.2808) Prec@1 88.281 (90.376) | ||
433 | +Test: [0/2] Time 1.681 (1.681) Loss 0.5782 (0.5782) Prec@1 71.484 (71.484) | ||
434 | + * epoch: 63 Prec@1 70.270 | ||
435 | + * epoch: 63 Prec@1 70.270 | ||
436 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
437 | +Epoch: [64][0/12] Time 2.252 (2.252) Loss 0.3744 (0.3744) Prec@1 87.891 (87.891) | ||
438 | +Epoch: [64][10/12] Time 0.158 (0.349) Loss 0.3298 (0.3217) Prec@1 88.672 (89.276) | ||
439 | +Test: [0/2] Time 1.717 (1.717) Loss 0.4983 (0.4983) Prec@1 89.062 (89.062) | ||
440 | + * epoch: 64 Prec@1 89.489 | ||
441 | + * epoch: 64 Prec@1 89.489 | ||
442 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
443 | +Epoch: [65][0/12] Time 1.913 (1.913) Loss 0.3845 (0.3845) Prec@1 87.891 (87.891) | ||
444 | +Epoch: [65][10/12] Time 0.161 (0.332) Loss 0.2949 (0.3003) Prec@1 89.062 (89.950) | ||
445 | +Test: [0/2] Time 1.693 (1.693) Loss 0.5655 (0.5655) Prec@1 86.328 (86.328) | ||
446 | + * epoch: 65 Prec@1 87.087 | ||
447 | + * epoch: 65 Prec@1 87.087 | ||
448 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
449 | +Epoch: [66][0/12] Time 1.949 (1.949) Loss 0.4444 (0.4444) Prec@1 87.109 (87.109) | ||
450 | +Epoch: [66][10/12] Time 0.159 (0.322) Loss 0.3399 (0.3130) Prec@1 89.062 (89.098) | ||
451 | +Test: [0/2] Time 1.703 (1.703) Loss 0.5085 (0.5085) Prec@1 89.844 (89.844) | ||
452 | + * epoch: 66 Prec@1 87.688 | ||
453 | + * epoch: 66 Prec@1 87.688 | ||
454 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
455 | +Epoch: [67][0/12] Time 1.946 (1.946) Loss 0.2375 (0.2375) Prec@1 91.016 (91.016) | ||
456 | +Epoch: [67][10/12] Time 0.159 (0.328) Loss 0.3387 (0.2769) Prec@1 85.547 (90.128) | ||
457 | +Test: [0/2] Time 1.703 (1.703) Loss 0.5086 (0.5086) Prec@1 88.281 (88.281) | ||
458 | + * epoch: 67 Prec@1 87.387 | ||
459 | + * epoch: 67 Prec@1 87.387 | ||
460 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
461 | +Epoch: [68][0/12] Time 1.972 (1.972) Loss 0.3379 (0.3379) Prec@1 90.234 (90.234) | ||
462 | +Epoch: [68][10/12] Time 0.159 (0.324) Loss 0.2454 (0.3145) Prec@1 91.406 (88.672) | ||
463 | +Test: [0/2] Time 1.719 (1.719) Loss 0.6045 (0.6045) Prec@1 87.500 (87.500) | ||
464 | + * epoch: 68 Prec@1 88.589 | ||
465 | + * epoch: 68 Prec@1 88.589 | ||
466 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
467 | +Epoch: [69][0/12] Time 1.947 (1.947) Loss 0.2363 (0.2363) Prec@1 90.625 (90.625) | ||
468 | +Epoch: [69][10/12] Time 0.159 (0.321) Loss 0.2486 (0.2593) Prec@1 90.234 (90.874) | ||
469 | +Test: [0/2] Time 1.710 (1.710) Loss 0.5298 (0.5298) Prec@1 80.078 (80.078) | ||
470 | + * epoch: 69 Prec@1 81.381 | ||
471 | + * epoch: 69 Prec@1 81.381 | ||
472 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
473 | +Epoch: [70][0/12] Time 1.949 (1.949) Loss 0.2916 (0.2916) Prec@1 89.453 (89.453) | ||
474 | +Epoch: [70][10/12] Time 0.158 (0.328) Loss 0.2786 (0.2966) Prec@1 89.453 (89.737) | ||
475 | +Test: [0/2] Time 1.683 (1.683) Loss 0.4976 (0.4976) Prec@1 87.500 (87.500) | ||
476 | + * epoch: 70 Prec@1 88.288 | ||
477 | + * epoch: 70 Prec@1 88.288 | ||
478 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
479 | +Epoch: [71][0/12] Time 1.928 (1.928) Loss 0.1949 (0.1949) Prec@1 94.141 (94.141) | ||
480 | +Epoch: [71][10/12] Time 0.159 (0.330) Loss 0.1884 (0.2445) Prec@1 93.359 (90.696) | ||
481 | +Test: [0/2] Time 1.684 (1.684) Loss 0.5251 (0.5251) Prec@1 82.812 (82.812) | ||
482 | + * epoch: 71 Prec@1 81.081 | ||
483 | + * epoch: 71 Prec@1 81.081 | ||
484 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
485 | +Epoch: [72][0/12] Time 1.930 (1.930) Loss 0.3282 (0.3282) Prec@1 87.500 (87.500) | ||
486 | +Epoch: [72][10/12] Time 0.159 (0.320) Loss 0.3842 (0.3169) Prec@1 86.719 (89.666) | ||
487 | +Test: [0/2] Time 1.709 (1.709) Loss 0.6996 (0.6996) Prec@1 89.453 (89.453) | ||
488 | + * epoch: 72 Prec@1 90.390 | ||
489 | + * epoch: 72 Prec@1 90.390 | ||
490 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
491 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
492 | +Epoch: [73][0/12] Time 2.187 (2.187) Loss 0.3168 (0.3168) Prec@1 87.500 (87.500) | ||
493 | +Epoch: [73][10/12] Time 0.159 (0.343) Loss 0.3492 (0.2848) Prec@1 85.938 (89.986) | ||
494 | +Test: [0/2] Time 1.708 (1.708) Loss 0.9264 (0.9264) Prec@1 85.547 (85.547) | ||
495 | + * epoch: 73 Prec@1 86.787 | ||
496 | + * epoch: 73 Prec@1 86.787 | ||
497 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
498 | +Epoch: [74][0/12] Time 1.913 (1.913) Loss 0.3152 (0.3152) Prec@1 88.281 (88.281) | ||
499 | +Epoch: [74][10/12] Time 0.159 (0.319) Loss 0.2675 (0.2727) Prec@1 92.578 (90.518) | ||
500 | +Test: [0/2] Time 1.720 (1.720) Loss 0.5490 (0.5490) Prec@1 86.328 (86.328) | ||
501 | + * epoch: 74 Prec@1 86.186 | ||
502 | + * epoch: 74 Prec@1 86.186 | ||
503 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
504 | +Epoch: [75][0/12] Time 1.922 (1.922) Loss 0.2301 (0.2301) Prec@1 90.625 (90.625) | ||
505 | +Epoch: [75][10/12] Time 0.159 (0.320) Loss 0.3265 (0.2525) Prec@1 87.109 (91.229) | ||
506 | +Test: [0/2] Time 1.699 (1.699) Loss 0.6857 (0.6857) Prec@1 82.812 (82.812) | ||
507 | + * epoch: 75 Prec@1 82.282 | ||
508 | + * epoch: 75 Prec@1 82.282 | ||
509 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
510 | +Epoch: [76][0/12] Time 2.240 (2.240) Loss 0.2848 (0.2848) Prec@1 87.500 (87.500) | ||
511 | +Epoch: [76][10/12] Time 0.160 (0.348) Loss 0.3408 (0.3049) Prec@1 87.500 (89.560) | ||
512 | +Test: [0/2] Time 1.707 (1.707) Loss 0.5952 (0.5952) Prec@1 81.641 (81.641) | ||
513 | + * epoch: 76 Prec@1 81.682 | ||
514 | + * epoch: 76 Prec@1 81.682 | ||
515 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
516 | +Epoch: [77][0/12] Time 1.904 (1.904) Loss 0.2558 (0.2558) Prec@1 92.578 (92.578) | ||
517 | +Epoch: [77][10/12] Time 0.159 (0.324) Loss 0.3256 (0.2529) Prec@1 87.500 (91.229) | ||
518 | +Test: [0/2] Time 1.708 (1.708) Loss 0.5739 (0.5739) Prec@1 85.156 (85.156) | ||
519 | + * epoch: 77 Prec@1 82.883 | ||
520 | + * epoch: 77 Prec@1 82.883 | ||
521 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
522 | +Epoch: [78][0/12] Time 2.032 (2.032) Loss 0.2934 (0.2934) Prec@1 89.844 (89.844) | ||
523 | +Epoch: [78][10/12] Time 0.159 (0.330) Loss 0.2887 (0.2670) Prec@1 92.578 (91.300) | ||
524 | +Test: [0/2] Time 1.688 (1.688) Loss 0.5590 (0.5590) Prec@1 85.156 (85.156) | ||
525 | + * epoch: 78 Prec@1 86.186 | ||
526 | + * epoch: 78 Prec@1 86.186 | ||
527 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
528 | +Epoch: [79][0/12] Time 1.945 (1.945) Loss 0.2431 (0.2431) Prec@1 91.797 (91.797) | ||
529 | +Epoch: [79][10/12] Time 0.160 (0.332) Loss 0.3305 (0.2164) Prec@1 87.109 (92.543) | ||
530 | +Test: [0/2] Time 1.675 (1.675) Loss 0.7119 (0.7119) Prec@1 77.344 (77.344) | ||
531 | + * epoch: 79 Prec@1 76.877 | ||
532 | + * epoch: 79 Prec@1 76.877 | ||
533 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
534 | +Epoch: [80][0/12] Time 2.189 (2.189) Loss 0.2332 (0.2332) Prec@1 92.969 (92.969) | ||
535 | +Epoch: [80][10/12] Time 0.159 (0.343) Loss 0.2309 (0.2770) Prec@1 93.359 (91.335) | ||
536 | +Test: [0/2] Time 1.680 (1.680) Loss 0.3864 (0.3864) Prec@1 90.234 (90.234) | ||
537 | + * epoch: 80 Prec@1 87.688 | ||
538 | + * epoch: 80 Prec@1 87.688 | ||
539 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
540 | +Epoch: [81][0/12] Time 2.239 (2.239) Loss 0.2767 (0.2767) Prec@1 92.578 (92.578) | ||
541 | +Epoch: [81][10/12] Time 0.160 (0.348) Loss 0.3311 (0.2484) Prec@1 86.328 (91.335) | ||
542 | +Test: [0/2] Time 1.720 (1.720) Loss 0.6348 (0.6348) Prec@1 76.562 (76.562) | ||
543 | + * epoch: 81 Prec@1 76.276 | ||
544 | + * epoch: 81 Prec@1 76.276 | ||
545 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
546 | +Epoch: [82][0/12] Time 2.256 (2.256) Loss 0.1899 (0.1899) Prec@1 92.969 (92.969) | ||
547 | +Epoch: [82][10/12] Time 0.159 (0.349) Loss 0.2917 (0.2652) Prec@1 90.625 (91.442) | ||
548 | +Test: [0/2] Time 1.689 (1.689) Loss 0.5168 (0.5168) Prec@1 85.156 (85.156) | ||
549 | + * epoch: 82 Prec@1 85.886 | ||
550 | + * epoch: 82 Prec@1 85.886 | ||
551 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
552 | +Epoch: [83][0/12] Time 2.051 (2.051) Loss 0.1982 (0.1982) Prec@1 92.188 (92.188) | ||
553 | +Epoch: [83][10/12] Time 0.159 (0.332) Loss 0.2491 (0.2078) Prec@1 89.844 (92.543) | ||
554 | +Test: [0/2] Time 1.682 (1.682) Loss 0.6296 (0.6296) Prec@1 80.078 (80.078) | ||
555 | + * epoch: 83 Prec@1 77.177 | ||
556 | + * epoch: 83 Prec@1 77.177 | ||
557 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
558 | +Epoch: [84][0/12] Time 1.912 (1.912) Loss 0.1927 (0.1927) Prec@1 93.359 (93.359) | ||
559 | +Epoch: [84][10/12] Time 0.157 (0.318) Loss 0.1888 (0.2599) Prec@1 93.359 (91.406) | ||
560 | +Test: [0/2] Time 1.707 (1.707) Loss 0.5512 (0.5512) Prec@1 88.672 (88.672) | ||
561 | + * epoch: 84 Prec@1 89.790 | ||
562 | + * epoch: 84 Prec@1 89.790 | ||
563 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
564 | +Epoch: [85][0/12] Time 1.909 (1.909) Loss 0.1752 (0.1752) Prec@1 93.750 (93.750) | ||
565 | +Epoch: [85][10/12] Time 0.161 (0.319) Loss 0.2747 (0.2133) Prec@1 91.797 (92.614) | ||
566 | +Test: [0/2] Time 1.704 (1.704) Loss 0.5671 (0.5671) Prec@1 86.328 (86.328) | ||
567 | + * epoch: 85 Prec@1 87.387 | ||
568 | + * epoch: 85 Prec@1 87.387 | ||
569 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
570 | +Epoch: [86][0/12] Time 2.240 (2.240) Loss 0.2070 (0.2070) Prec@1 89.844 (89.844) | ||
571 | +Epoch: [86][10/12] Time 0.160 (0.348) Loss 0.2832 (0.2450) Prec@1 91.797 (91.513) | ||
572 | +Test: [0/2] Time 1.709 (1.709) Loss 0.5368 (0.5368) Prec@1 86.719 (86.719) | ||
573 | + * epoch: 86 Prec@1 85.586 | ||
574 | + * epoch: 86 Prec@1 85.586 | ||
575 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
576 | +Epoch: [87][0/12] Time 1.953 (1.953) Loss 0.1469 (0.1469) Prec@1 96.094 (96.094) | ||
577 | +Epoch: [87][10/12] Time 0.161 (0.329) Loss 0.1976 (0.2014) Prec@1 92.969 (93.466) | ||
578 | +Test: [0/2] Time 1.704 (1.704) Loss 0.6268 (0.6268) Prec@1 86.328 (86.328) | ||
579 | + * epoch: 87 Prec@1 88.288 | ||
580 | + * epoch: 87 Prec@1 88.288 | ||
581 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
582 | +Epoch: [88][0/12] Time 1.976 (1.976) Loss 0.1892 (0.1892) Prec@1 92.969 (92.969) | ||
583 | +Epoch: [88][10/12] Time 0.159 (0.323) Loss 0.1663 (0.2332) Prec@1 93.359 (91.442) | ||
584 | +Test: [0/2] Time 1.725 (1.725) Loss 0.4768 (0.4768) Prec@1 89.844 (89.844) | ||
585 | + * epoch: 88 Prec@1 89.489 | ||
586 | + * epoch: 88 Prec@1 89.489 | ||
587 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
588 | +Epoch: [89][0/12] Time 2.208 (2.208) Loss 0.1286 (0.1286) Prec@1 94.141 (94.141) | ||
589 | +Epoch: [89][10/12] Time 0.159 (0.345) Loss 0.1622 (0.1895) Prec@1 93.750 (92.862) | ||
590 | +Test: [0/2] Time 1.670 (1.670) Loss 0.7504 (0.7504) Prec@1 86.719 (86.719) | ||
591 | + * epoch: 89 Prec@1 88.288 | ||
592 | + * epoch: 89 Prec@1 88.288 | ||
593 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
594 | +Epoch: [90][0/12] Time 1.919 (1.919) Loss 0.1749 (0.1749) Prec@1 94.531 (94.531) | ||
595 | +Epoch: [90][10/12] Time 0.160 (0.319) Loss 0.3030 (0.2244) Prec@1 90.234 (92.472) | ||
596 | +Test: [0/2] Time 1.703 (1.703) Loss 0.6520 (0.6520) Prec@1 87.500 (87.500) | ||
597 | + * epoch: 90 Prec@1 89.489 | ||
598 | + * epoch: 90 Prec@1 89.489 | ||
599 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
600 | +Epoch: [91][0/12] Time 2.019 (2.019) Loss 0.1967 (0.1967) Prec@1 93.750 (93.750) | ||
601 | +Epoch: [91][10/12] Time 0.160 (0.328) Loss 0.2092 (0.1974) Prec@1 91.406 (93.146) | ||
602 | +Test: [0/2] Time 1.717 (1.717) Loss 0.5337 (0.5337) Prec@1 89.453 (89.453) | ||
603 | + * epoch: 91 Prec@1 89.489 | ||
604 | + * epoch: 91 Prec@1 89.489 | ||
605 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
606 | +Epoch: [92][0/12] Time 1.908 (1.908) Loss 0.1796 (0.1796) Prec@1 93.750 (93.750) | ||
607 | +Epoch: [92][10/12] Time 0.160 (0.320) Loss 0.2263 (0.2480) Prec@1 93.359 (91.868) | ||
608 | +Test: [0/2] Time 1.715 (1.715) Loss 0.4933 (0.4933) Prec@1 88.672 (88.672) | ||
609 | + * epoch: 92 Prec@1 89.189 | ||
610 | + * epoch: 92 Prec@1 89.189 | ||
611 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
612 | +Epoch: [93][0/12] Time 1.934 (1.934) Loss 0.1817 (0.1817) Prec@1 93.750 (93.750) | ||
613 | +Epoch: [93][10/12] Time 0.158 (0.320) Loss 0.2488 (0.2133) Prec@1 90.625 (92.330) | ||
614 | +Test: [0/2] Time 1.699 (1.699) Loss 0.7086 (0.7086) Prec@1 80.469 (80.469) | ||
615 | + * epoch: 93 Prec@1 79.279 | ||
616 | + * epoch: 93 Prec@1 79.279 | ||
617 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
618 | +Epoch: [94][0/12] Time 1.955 (1.955) Loss 0.2422 (0.2422) Prec@1 89.453 (89.453) | ||
619 | +Epoch: [94][10/12] Time 0.159 (0.322) Loss 0.1780 (0.2533) Prec@1 93.359 (90.518) | ||
620 | +Test: [0/2] Time 1.699 (1.699) Loss 0.6035 (0.6035) Prec@1 89.062 (89.062) | ||
621 | + * epoch: 94 Prec@1 89.189 | ||
622 | + * epoch: 94 Prec@1 89.189 | ||
623 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
624 | +Epoch: [95][0/12] Time 1.975 (1.975) Loss 0.2582 (0.2582) Prec@1 90.234 (90.234) | ||
625 | +Epoch: [95][10/12] Time 0.159 (0.323) Loss 0.2944 (0.2084) Prec@1 92.188 (92.791) | ||
626 | +Test: [0/2] Time 1.730 (1.730) Loss 1.0666 (1.0666) Prec@1 67.188 (67.188) | ||
627 | + * epoch: 95 Prec@1 66.967 | ||
628 | + * epoch: 95 Prec@1 66.967 | ||
629 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
630 | +Epoch: [96][0/12] Time 1.966 (1.966) Loss 0.1643 (0.1643) Prec@1 94.141 (94.141) | ||
631 | +Epoch: [96][10/12] Time 0.160 (0.331) Loss 0.2444 (0.2336) Prec@1 91.016 (92.294) | ||
632 | +Test: [0/2] Time 1.694 (1.694) Loss 0.5861 (0.5861) Prec@1 86.328 (86.328) | ||
633 | + * epoch: 96 Prec@1 86.486 | ||
634 | + * epoch: 96 Prec@1 86.486 | ||
635 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
636 | +Epoch: [97][0/12] Time 1.914 (1.914) Loss 0.1659 (0.1659) Prec@1 94.531 (94.531) | ||
637 | +Epoch: [97][10/12] Time 0.160 (0.330) Loss 0.2504 (0.1904) Prec@1 92.578 (93.466) | ||
638 | +Test: [0/2] Time 1.678 (1.678) Loss 0.6894 (0.6894) Prec@1 78.906 (78.906) | ||
639 | + * epoch: 97 Prec@1 80.480 | ||
640 | + * epoch: 97 Prec@1 80.480 | ||
641 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
642 | +Epoch: [98][0/12] Time 1.902 (1.902) Loss 0.2155 (0.2155) Prec@1 91.406 (91.406) | ||
643 | +Epoch: [98][10/12] Time 0.159 (0.317) Loss 0.2941 (0.2129) Prec@1 92.578 (92.401) | ||
644 | +Test: [0/2] Time 1.687 (1.687) Loss 0.3753 (0.3753) Prec@1 94.922 (94.922) | ||
645 | + * epoch: 98 Prec@1 93.093 | ||
646 | + * epoch: 98 Prec@1 93.093 | ||
647 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
648 | +New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar | ||
649 | +Epoch: [99][0/12] Time 1.923 (1.923) Loss 0.1940 (0.1940) Prec@1 95.312 (95.312) | ||
650 | +Epoch: [99][10/12] Time 0.160 (0.321) Loss 0.1865 (0.1930) Prec@1 93.750 (93.537) | ||
651 | +Test: [0/2] Time 1.672 (1.672) Loss 0.5767 (0.5767) Prec@1 83.594 (83.594) | ||
652 | + * epoch: 99 Prec@1 83.483 | ||
653 | + * epoch: 99 Prec@1 83.483 | ||
654 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
655 | +Epoch: [100][0/12] Time 1.946 (1.946) Loss 0.3028 (0.3028) Prec@1 91.016 (91.016) | ||
656 | +Epoch: [100][10/12] Time 0.159 (0.323) Loss 0.2235 (0.2103) Prec@1 91.406 (92.969) | ||
657 | +Test: [0/2] Time 1.704 (1.704) Loss 0.5625 (0.5625) Prec@1 89.844 (89.844) | ||
658 | + * epoch: 100 Prec@1 88.889 | ||
659 | + * epoch: 100 Prec@1 88.889 | ||
660 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
661 | +Epoch: [101][0/12] Time 1.920 (1.920) Loss 0.3380 (0.3380) Prec@1 89.844 (89.844) | ||
662 | +Epoch: [101][10/12] Time 0.159 (0.320) Loss 0.1733 (0.1909) Prec@1 92.188 (93.679) | ||
663 | +Test: [0/2] Time 1.678 (1.678) Loss 0.6445 (0.6445) Prec@1 89.062 (89.062) | ||
664 | + * epoch: 101 Prec@1 88.589 | ||
665 | + * epoch: 101 Prec@1 88.589 | ||
666 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
667 | +Epoch: [102][0/12] Time 1.895 (1.895) Loss 0.2093 (0.2093) Prec@1 92.969 (92.969) | ||
668 | +Epoch: [102][10/12] Time 0.159 (0.317) Loss 0.2647 (0.2172) Prec@1 89.844 (92.791) | ||
669 | +Test: [0/2] Time 1.691 (1.691) Loss 0.4537 (0.4537) Prec@1 89.844 (89.844) | ||
670 | + * epoch: 102 Prec@1 90.390 | ||
671 | + * epoch: 102 Prec@1 90.390 | ||
672 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
673 | +Epoch: [103][0/12] Time 1.975 (1.975) Loss 0.1979 (0.1979) Prec@1 92.969 (92.969) | ||
674 | +Epoch: [103][10/12] Time 0.159 (0.324) Loss 0.2140 (0.1836) Prec@1 89.844 (93.217) | ||
675 | +Test: [0/2] Time 1.706 (1.706) Loss 0.7860 (0.7860) Prec@1 89.062 (89.062) | ||
676 | + * epoch: 103 Prec@1 90.090 | ||
677 | + * epoch: 103 Prec@1 90.090 | ||
678 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
679 | +Epoch: [104][0/12] Time 2.022 (2.022) Loss 0.2810 (0.2810) Prec@1 92.578 (92.578) | ||
680 | +Epoch: [104][10/12] Time 0.159 (0.330) Loss 0.2480 (0.2219) Prec@1 91.797 (92.081) | ||
681 | +Test: [0/2] Time 1.716 (1.716) Loss 0.6215 (0.6215) Prec@1 90.625 (90.625) | ||
682 | + * epoch: 104 Prec@1 91.592 | ||
683 | + * epoch: 104 Prec@1 91.592 | ||
684 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
685 | +Epoch: [105][0/12] Time 1.906 (1.906) Loss 0.1642 (0.1642) Prec@1 95.312 (95.312) | ||
686 | +Epoch: [105][10/12] Time 0.159 (0.319) Loss 0.1640 (0.1851) Prec@1 93.750 (93.928) | ||
687 | +Test: [0/2] Time 1.708 (1.708) Loss 0.5004 (0.5004) Prec@1 90.625 (90.625) | ||
688 | + * epoch: 105 Prec@1 90.390 | ||
689 | + * epoch: 105 Prec@1 90.390 | ||
690 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
691 | +Epoch: [106][0/12] Time 1.909 (1.909) Loss 0.1803 (0.1803) Prec@1 90.234 (90.234) | ||
692 | +Epoch: [106][10/12] Time 0.158 (0.319) Loss 0.1994 (0.2141) Prec@1 91.797 (92.223) | ||
693 | +Test: [0/2] Time 1.688 (1.688) Loss 0.4920 (0.4920) Prec@1 82.031 (82.031) | ||
694 | + * epoch: 106 Prec@1 79.580 | ||
695 | + * epoch: 106 Prec@1 79.580 | ||
696 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
697 | +Epoch: [107][0/12] Time 1.971 (1.971) Loss 0.1579 (0.1579) Prec@1 92.969 (92.969) | ||
698 | +Epoch: [107][10/12] Time 0.160 (0.330) Loss 0.1896 (0.1695) Prec@1 93.359 (93.786) | ||
699 | +Test: [0/2] Time 1.693 (1.693) Loss 0.6627 (0.6627) Prec@1 85.156 (85.156) | ||
700 | + * epoch: 107 Prec@1 85.285 | ||
701 | + * epoch: 107 Prec@1 85.285 | ||
702 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
703 | +Epoch: [108][0/12] Time 1.926 (1.926) Loss 0.2721 (0.2721) Prec@1 90.234 (90.234) | ||
704 | +Epoch: [108][10/12] Time 0.159 (0.334) Loss 0.1391 (0.1956) Prec@1 94.531 (93.395) | ||
705 | +Test: [0/2] Time 1.693 (1.693) Loss 0.6169 (0.6169) Prec@1 87.891 (87.891) | ||
706 | + * epoch: 108 Prec@1 88.889 | ||
707 | + * epoch: 108 Prec@1 88.889 | ||
708 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
709 | +Epoch: [109][0/12] Time 1.932 (1.932) Loss 0.1255 (0.1255) Prec@1 96.094 (96.094) | ||
710 | +Epoch: [109][10/12] Time 0.159 (0.318) Loss 0.1880 (0.1642) Prec@1 93.750 (94.567) | ||
711 | +Test: [0/2] Time 1.702 (1.702) Loss 0.6942 (0.6942) Prec@1 86.719 (86.719) | ||
712 | + * epoch: 109 Prec@1 88.288 | ||
713 | + * epoch: 109 Prec@1 88.288 | ||
714 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
715 | +Epoch: [110][0/12] Time 1.941 (1.941) Loss 0.1494 (0.1494) Prec@1 95.312 (95.312) | ||
716 | +Epoch: [110][10/12] Time 0.159 (0.320) Loss 0.2451 (0.2071) Prec@1 91.016 (92.330) | ||
717 | +Test: [0/2] Time 1.721 (1.721) Loss 0.5960 (0.5960) Prec@1 86.328 (86.328) | ||
718 | + * epoch: 110 Prec@1 86.486 | ||
719 | + * epoch: 110 Prec@1 86.486 | ||
720 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
721 | +Epoch: [111][0/12] Time 1.969 (1.969) Loss 0.1588 (0.1588) Prec@1 94.141 (94.141) | ||
722 | +Epoch: [111][10/12] Time 0.160 (0.323) Loss 0.1406 (0.1750) Prec@1 96.094 (93.395) | ||
723 | +Test: [0/2] Time 1.680 (1.680) Loss 0.6977 (0.6977) Prec@1 90.625 (90.625) | ||
724 | + * epoch: 111 Prec@1 90.090 | ||
725 | + * epoch: 111 Prec@1 90.090 | ||
726 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
727 | +Epoch: [112][0/12] Time 1.914 (1.914) Loss 0.2185 (0.2185) Prec@1 92.578 (92.578) | ||
728 | +Epoch: [112][10/12] Time 0.161 (0.318) Loss 0.1134 (0.1589) Prec@1 94.922 (94.389) | ||
729 | +Test: [0/2] Time 1.692 (1.692) Loss 0.6469 (0.6469) Prec@1 89.062 (89.062) | ||
730 | + * epoch: 112 Prec@1 89.489 | ||
731 | + * epoch: 112 Prec@1 89.489 | ||
732 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
733 | +Epoch: [113][0/12] Time 1.978 (1.978) Loss 0.1283 (0.1283) Prec@1 95.703 (95.703) | ||
734 | +Epoch: [113][10/12] Time 0.158 (0.323) Loss 0.2125 (0.1576) Prec@1 93.359 (94.815) | ||
735 | +Test: [0/2] Time 1.699 (1.699) Loss 0.9452 (0.9452) Prec@1 67.578 (67.578) | ||
736 | + * epoch: 113 Prec@1 65.766 | ||
737 | + * epoch: 113 Prec@1 65.766 | ||
738 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
739 | +Epoch: [114][0/12] Time 1.950 (1.950) Loss 0.2040 (0.2040) Prec@1 92.188 (92.188) | ||
740 | +Epoch: [114][10/12] Time 0.159 (0.322) Loss 0.2110 (0.1899) Prec@1 94.531 (93.253) | ||
741 | +Test: [0/2] Time 1.687 (1.687) Loss 0.5138 (0.5138) Prec@1 86.719 (86.719) | ||
742 | + * epoch: 114 Prec@1 86.486 | ||
743 | + * epoch: 114 Prec@1 86.486 | ||
744 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
745 | +Epoch: [115][0/12] Time 1.998 (1.998) Loss 0.1638 (0.1638) Prec@1 95.703 (95.703) | ||
746 | +Epoch: [115][10/12] Time 0.160 (0.331) Loss 0.2106 (0.1672) Prec@1 92.578 (94.354) | ||
747 | +Test: [0/2] Time 1.709 (1.709) Loss 0.8591 (0.8591) Prec@1 89.062 (89.062) | ||
748 | + * epoch: 115 Prec@1 89.790 | ||
749 | + * epoch: 115 Prec@1 89.790 | ||
750 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
751 | +Epoch: [116][0/12] Time 1.976 (1.976) Loss 0.1392 (0.1392) Prec@1 92.969 (92.969) | ||
752 | +Epoch: [116][10/12] Time 0.160 (0.336) Loss 0.2718 (0.2105) Prec@1 92.578 (93.253) | ||
753 | +Test: [0/2] Time 1.718 (1.718) Loss 0.4857 (0.4857) Prec@1 88.281 (88.281) | ||
754 | + * epoch: 116 Prec@1 88.589 | ||
755 | + * epoch: 116 Prec@1 88.589 | ||
756 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
757 | +Epoch: [117][0/12] Time 2.203 (2.203) Loss 0.1616 (0.1616) Prec@1 92.969 (92.969) | ||
758 | +Epoch: [117][10/12] Time 0.159 (0.345) Loss 0.1600 (0.1427) Prec@1 94.141 (94.638) | ||
759 | +Test: [0/2] Time 1.732 (1.732) Loss 0.8432 (0.8432) Prec@1 89.453 (89.453) | ||
760 | + * epoch: 117 Prec@1 90.691 | ||
761 | + * epoch: 117 Prec@1 90.691 | ||
762 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
763 | +Epoch: [118][0/12] Time 1.918 (1.918) Loss 0.1563 (0.1563) Prec@1 94.141 (94.141) | ||
764 | +Epoch: [118][10/12] Time 0.160 (0.319) Loss 0.1545 (0.1740) Prec@1 93.750 (93.892) | ||
765 | +Test: [0/2] Time 1.722 (1.722) Loss 0.4324 (0.4324) Prec@1 91.797 (91.797) | ||
766 | + * epoch: 118 Prec@1 90.991 | ||
767 | + * epoch: 118 Prec@1 90.991 | ||
768 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
769 | +Epoch: [119][0/12] Time 1.912 (1.912) Loss 0.1632 (0.1632) Prec@1 92.578 (92.578) | ||
770 | +Epoch: [119][10/12] Time 0.160 (0.317) Loss 0.1550 (0.1470) Prec@1 94.922 (94.638) | ||
771 | +Test: [0/2] Time 1.743 (1.743) Loss 0.5448 (0.5448) Prec@1 86.719 (86.719) | ||
772 | + * epoch: 119 Prec@1 85.886 | ||
773 | + * epoch: 119 Prec@1 85.886 | ||
774 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
775 | +Epoch: [120][0/12] Time 1.956 (1.956) Loss 0.1617 (0.1617) Prec@1 95.703 (95.703) | ||
776 | +Epoch: [120][10/12] Time 0.159 (0.322) Loss 0.1568 (0.1884) Prec@1 94.531 (93.466) | ||
777 | +Test: [0/2] Time 1.724 (1.724) Loss 0.4884 (0.4884) Prec@1 89.062 (89.062) | ||
778 | + * epoch: 120 Prec@1 89.189 | ||
779 | + * epoch: 120 Prec@1 89.189 | ||
780 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
781 | +Epoch: [121][0/12] Time 2.252 (2.252) Loss 0.0956 (0.0956) Prec@1 96.484 (96.484) | ||
782 | +Epoch: [121][10/12] Time 0.160 (0.350) Loss 0.0892 (0.1378) Prec@1 96.094 (94.425) | ||
783 | +Test: [0/2] Time 1.702 (1.702) Loss 0.9220 (0.9220) Prec@1 71.875 (71.875) | ||
784 | + * epoch: 121 Prec@1 72.372 | ||
785 | + * epoch: 121 Prec@1 72.372 | ||
786 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
787 | +Epoch: [122][0/12] Time 2.193 (2.193) Loss 0.1376 (0.1376) Prec@1 93.359 (93.359) | ||
788 | +Epoch: [122][10/12] Time 0.154 (0.344) Loss 0.1217 (0.1669) Prec@1 95.312 (94.034) | ||
789 | +Test: [0/2] Time 1.728 (1.728) Loss 0.4749 (0.4749) Prec@1 91.406 (91.406) | ||
790 | + * epoch: 122 Prec@1 90.090 | ||
791 | + * epoch: 122 Prec@1 90.090 | ||
792 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
793 | +Epoch: [123][0/12] Time 1.969 (1.969) Loss 0.1731 (0.1731) Prec@1 93.359 (93.359) | ||
794 | +Epoch: [123][10/12] Time 0.160 (0.332) Loss 0.1657 (0.1350) Prec@1 95.703 (95.419) | ||
795 | +Test: [0/2] Time 1.702 (1.702) Loss 0.4422 (0.4422) Prec@1 92.188 (92.188) | ||
796 | + * epoch: 123 Prec@1 90.991 | ||
797 | + * epoch: 123 Prec@1 90.991 | ||
798 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
799 | +Epoch: [124][0/12] Time 1.910 (1.910) Loss 0.1530 (0.1530) Prec@1 94.922 (94.922) | ||
800 | +Epoch: [124][10/12] Time 0.161 (0.335) Loss 0.1130 (0.1614) Prec@1 96.094 (94.744) | ||
801 | +Test: [0/2] Time 1.703 (1.703) Loss 0.6778 (0.6778) Prec@1 91.016 (91.016) | ||
802 | + * epoch: 124 Prec@1 91.592 | ||
803 | + * epoch: 124 Prec@1 91.592 | ||
804 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
805 | +Epoch: [125][0/12] Time 1.927 (1.927) Loss 0.1401 (0.1401) Prec@1 95.312 (95.312) | ||
806 | +Epoch: [125][10/12] Time 0.159 (0.334) Loss 0.1760 (0.1236) Prec@1 94.141 (95.774) | ||
807 | +Test: [0/2] Time 1.697 (1.697) Loss 0.8132 (0.8132) Prec@1 70.703 (70.703) | ||
808 | + * epoch: 125 Prec@1 72.072 | ||
809 | + * epoch: 125 Prec@1 72.072 | ||
810 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
811 | +Epoch: [126][0/12] Time 2.032 (2.032) Loss 0.1410 (0.1410) Prec@1 94.141 (94.141) | ||
812 | +Epoch: [126][10/12] Time 0.159 (0.330) Loss 0.1733 (0.1614) Prec@1 96.094 (93.928) | ||
813 | +Test: [0/2] Time 1.692 (1.692) Loss 0.4986 (0.4986) Prec@1 86.328 (86.328) | ||
814 | + * epoch: 126 Prec@1 87.688 | ||
815 | + * epoch: 126 Prec@1 87.688 | ||
816 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
817 | +Epoch: [127][0/12] Time 2.220 (2.220) Loss 0.1754 (0.1754) Prec@1 93.750 (93.750) | ||
818 | +Epoch: [127][10/12] Time 0.159 (0.346) Loss 0.1546 (0.1384) Prec@1 93.750 (94.922) | ||
819 | +Test: [0/2] Time 1.701 (1.701) Loss 0.9395 (0.9395) Prec@1 85.156 (85.156) | ||
820 | + * epoch: 127 Prec@1 86.787 | ||
821 | + * epoch: 127 Prec@1 86.787 | ||
822 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
823 | +Epoch: [128][0/12] Time 1.902 (1.902) Loss 0.1456 (0.1456) Prec@1 95.703 (95.703) | ||
824 | +Epoch: [128][10/12] Time 0.160 (0.322) Loss 0.1806 (0.1682) Prec@1 93.750 (94.496) | ||
825 | +Test: [0/2] Time 1.711 (1.711) Loss 0.5188 (0.5188) Prec@1 90.625 (90.625) | ||
826 | + * epoch: 128 Prec@1 90.691 | ||
827 | + * epoch: 128 Prec@1 90.691 | ||
828 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
829 | +Epoch: [129][0/12] Time 1.951 (1.951) Loss 0.0664 (0.0664) Prec@1 98.047 (98.047) | ||
830 | +Epoch: [129][10/12] Time 0.160 (0.323) Loss 0.1485 (0.1174) Prec@1 94.922 (96.058) | ||
831 | +Test: [0/2] Time 1.704 (1.704) Loss 0.6762 (0.6762) Prec@1 89.062 (89.062) | ||
832 | + * epoch: 129 Prec@1 90.090 | ||
833 | + * epoch: 129 Prec@1 90.090 | ||
834 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
835 | +Epoch: [130][0/12] Time 1.943 (1.943) Loss 0.2280 (0.2280) Prec@1 92.578 (92.578) | ||
836 | +Epoch: [130][10/12] Time 0.160 (0.332) Loss 0.2291 (0.1751) Prec@1 94.531 (93.786) | ||
837 | +Test: [0/2] Time 1.717 (1.717) Loss 0.4670 (0.4670) Prec@1 91.406 (91.406) | ||
838 | + * epoch: 130 Prec@1 91.892 | ||
839 | + * epoch: 130 Prec@1 91.892 | ||
840 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
841 | +Epoch: [131][0/12] Time 2.158 (2.158) Loss 0.1494 (0.1494) Prec@1 94.141 (94.141) | ||
842 | +Epoch: [131][10/12] Time 0.159 (0.341) Loss 0.1707 (0.1408) Prec@1 93.750 (94.744) | ||
843 | +Test: [0/2] Time 1.699 (1.699) Loss 0.4758 (0.4758) Prec@1 87.891 (87.891) | ||
844 | + * epoch: 131 Prec@1 87.387 | ||
845 | + * epoch: 131 Prec@1 87.387 | ||
846 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
847 | +Epoch: [132][0/12] Time 1.991 (1.991) Loss 0.1658 (0.1658) Prec@1 92.578 (92.578) | ||
848 | +Epoch: [132][10/12] Time 0.159 (0.326) Loss 0.1663 (0.1657) Prec@1 94.922 (94.212) | ||
849 | +Test: [0/2] Time 1.708 (1.708) Loss 0.5929 (0.5929) Prec@1 91.016 (91.016) | ||
850 | + * epoch: 132 Prec@1 91.892 | ||
851 | + * epoch: 132 Prec@1 91.892 | ||
852 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
853 | +Epoch: [133][0/12] Time 1.973 (1.973) Loss 0.0819 (0.0819) Prec@1 97.656 (97.656) | ||
854 | +Epoch: [133][10/12] Time 0.159 (0.324) Loss 0.1221 (0.1269) Prec@1 94.922 (95.135) | ||
855 | +Test: [0/2] Time 1.696 (1.696) Loss 0.4731 (0.4731) Prec@1 92.578 (92.578) | ||
856 | + * epoch: 133 Prec@1 92.793 | ||
857 | + * epoch: 133 Prec@1 92.793 | ||
858 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
859 | +Epoch: [134][0/12] Time 1.971 (1.971) Loss 0.0957 (0.0957) Prec@1 98.047 (98.047) | ||
860 | +Epoch: [134][10/12] Time 0.160 (0.324) Loss 0.1335 (0.1506) Prec@1 93.359 (94.567) | ||
861 | +Test: [0/2] Time 1.715 (1.715) Loss 0.5484 (0.5484) Prec@1 91.797 (91.797) | ||
862 | + * epoch: 134 Prec@1 92.192 | ||
863 | + * epoch: 134 Prec@1 92.192 | ||
864 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
865 | +Epoch: [135][0/12] Time 2.178 (2.178) Loss 0.1302 (0.1302) Prec@1 93.750 (93.750) | ||
866 | +Epoch: [135][10/12] Time 0.161 (0.342) Loss 0.1452 (0.1391) Prec@1 94.922 (94.922) | ||
867 | +Test: [0/2] Time 1.709 (1.709) Loss 0.7818 (0.7818) Prec@1 89.453 (89.453) | ||
868 | + * epoch: 135 Prec@1 89.489 | ||
869 | + * epoch: 135 Prec@1 89.489 | ||
870 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
871 | +Epoch: [136][0/12] Time 1.960 (1.960) Loss 0.1286 (0.1286) Prec@1 96.094 (96.094) | ||
872 | +Epoch: [136][10/12] Time 0.160 (0.323) Loss 0.0955 (0.1380) Prec@1 97.656 (94.709) | ||
873 | +Test: [0/2] Time 1.717 (1.717) Loss 0.5795 (0.5795) Prec@1 89.453 (89.453) | ||
874 | + * epoch: 136 Prec@1 90.090 | ||
875 | + * epoch: 136 Prec@1 90.090 | ||
876 | +Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
877 | +Epoch: [137][0/12] Time 1.968 (1.968) Loss 0.1092 (0.1092) Prec@1 94.531 (94.531) | ||
878 | +Epoch: [137][10/12] Time 0.160 (0.324) Loss 0.2211 (0.1232) Prec@1 93.359 (95.526) |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | + * Prec@1 98.193 | ||
5 | + * Prec@1 98.193 | ||
6 | +Best accuracy: 98.1927713600986 | ||
7 | +[validate_2020-03-26-17-26-07] done | ||
8 | +[validate_2020-03-26-17-26-07] done | ||
9 | +set Type | ||
10 | +Number of model parameters: 461559 | ||
11 | +=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
12 | +=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382) | ||
13 | + * Prec@1 95.495 | ||
14 | + * Prec@1 95.495 | ||
15 | +Best accuracy: 95.49549572460644 | ||
16 | +[validate_2020-03-26-17-26-14] done | ||
17 | +[validate_2020-03-26-17-26-14] done | ||
18 | +/home/yh9468/detection/data/Fourth_data/demo Test dir submitted | ||
19 | +start test using path : /home/yh9468/detection/data/Fourth_data/demo | ||
20 | +Test start | ||
21 | +loading checkpoint... | ||
22 | +checkpoint already loaded! | ||
23 | +start test | ||
24 | +data path directory is /home/yh9468/detection/data/Fourth_data/demo | ||
25 | +finish test | ||
26 | +/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp test file submitted | ||
27 | +start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp', 'All Files(*)') | ||
28 | +Test start | ||
29 | +loading checkpoint... | ||
30 | +checkpoint already loaded! | ||
31 | +start test | ||
32 | +finish test |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | + * Prec@1 98.193 | ||
5 | + * Prec@1 98.193 | ||
6 | +Best accuracy: 98.1927713600986 | ||
7 | +[validate_2020-03-27-11-52-28] done | ||
8 | +[validate_2020-03-27-11-52-28] done | ||
9 | +start test using path : ../data/Fourth_data/demo | ||
10 | +Test start | ||
11 | +loading checkpoint... | ||
12 | +checkpoint already loaded! | ||
13 | +start test | ||
14 | +data path directory is ../data/Fourth_data/demo | ||
15 | +Inference time 21 images : 0.37514 | ||
16 | +finish test | ||
17 | +set Type | ||
18 | +start test using path : ../data/Fourth_data/demo | ||
19 | +Test start | ||
20 | +loading checkpoint... | ||
21 | +checkpoint already loaded! | ||
22 | +start test | ||
23 | +data path directory is ../data/Fourth_data/demo | ||
24 | +Inference time 21 images : 0.7917 | ||
25 | +finish test | ||
26 | +/home/yh9468/detection/data/Fourth_data/demo/demoset/1-1.bmp test file submitted | ||
27 | +start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-1.bmp', 'All Files(*)') | ||
28 | +Test start | ||
29 | +loading checkpoint... | ||
30 | +checkpoint already loaded! | ||
31 | +start test | ||
32 | +Inference time 1 image : 0.03704 | ||
33 | +finish test |
1 | +using user's checkpoint ('E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar', 'All Files(*)') | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint '('E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar', 'All Files(*)')' | ||
4 | +Fatal error in main loop | ||
5 | +Traceback (most recent call last): | ||
6 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 289, in _check_seekable | ||
7 | + f.seek(f.tell()) | ||
8 | +AttributeError: 'tuple' object has no attribute 'seek' | ||
9 | + | ||
10 | +During handling of the above exception, another exception occurred: | ||
11 | + | ||
12 | +Traceback (most recent call last): | ||
13 | + File "E:\code\detection\trainer\test.py", line 256, in main | ||
14 | + run_model(args, q) | ||
15 | + File "E:\code\detection\trainer\test.py", line 328, in run_model | ||
16 | + checkpoint = torch.load(args['checkpoint']) | ||
17 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 525, in load | ||
18 | + with _open_file_like(f, 'rb') as opened_file: | ||
19 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 217, in _open_file_like | ||
20 | + return _open_buffer_reader(name_or_buffer) | ||
21 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 202, in __init__ | ||
22 | + _check_seekable(buffer) | ||
23 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 292, in _check_seekable | ||
24 | + raise_err_msg(["seek", "tell"], e) | ||
25 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 285, in raise_err_msg | ||
26 | + raise type(e)(msg) | ||
27 | +AttributeError: 'tuple' object has no attribute 'seek'. You can only torch.load from a file that is seekable. Please pre-load the data into a buffer like io.BytesIO and try to load from it instead. | ||
28 | +[validate_2020-03-31-14-53-49] failed |
1 | +using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
4 | +Fatal error in main loop | ||
5 | +Traceback (most recent call last): | ||
6 | + File "E:\code\detection\trainer\test.py", line 261, in main | ||
7 | + run_model(args, q) | ||
8 | + File "E:\code\detection\trainer\test.py", line 333, in run_model | ||
9 | + checkpoint = torch.load(args['checkpoint']) | ||
10 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 529, in load | ||
11 | + return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args) | ||
12 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 702, in _legacy_load | ||
13 | + result = unpickler.load() | ||
14 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 665, in persistent_load | ||
15 | + deserialized_objects[root_key] = restore_location(obj, location) | ||
16 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 156, in default_restore_location | ||
17 | + result = fn(storage, location) | ||
18 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 132, in _cuda_deserialize | ||
19 | + device = validate_cuda_device(location) | ||
20 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 116, in validate_cuda_device | ||
21 | + raise RuntimeError('Attempting to deserialize object on a CUDA ' | ||
22 | +RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU. | ||
23 | +[validate_2020-03-31-14-58-23] failed |
1 | +using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
4 | +=> loaded checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
5 | + * Prec@1 91.867 | ||
6 | + * Prec@1 91.867 | ||
7 | +Best accuracy: 95.90643257007264 |
1 | +using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
4 | +=> loaded checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
5 | +Test: [0/2] Time 23.714 (23.714) Loss 0.1847 (0.1847) Prec@1 92.969 (92.969) | ||
6 | +Test: [1/2] Time 0.464 (12.089) Loss 0.2262 (0.1942) Prec@1 88.158 (91.867) | ||
7 | + * Prec@1 91.867 | ||
8 | + * Prec@1 91.867 | ||
9 | +Best accuracy: 95.90643257007264 | ||
10 | +[validate_2020-03-31-15-08-03] done | ||
11 | +[validate_2020-03-31-15-08-03] done |
1 | +using default checkpoint |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\test.py", line 271, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\test.py", line 359, in run_model | ||
10 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
11 | + File "E:\code\detection\trainer\test.py", line 393, in validate | ||
12 | + if args['predict']['save']: | ||
13 | +KeyError: 'save' | ||
14 | +[validate_2020-04-01-18-17-52] failed |
1 | +Number of model parameters: 154706 | ||
2 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
4 | +Test: [0/1] Time 26.222 (26.222) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823) | ||
5 | + * Prec@1 94.823 | ||
6 | + * Prec@1 94.823 | ||
7 | +Best accuracy: 98.1927713600986 | ||
8 | +[validate_2020-04-01-18-45-23] done | ||
9 | +[validate_2020-04-01-18-45-23] done | ||
10 | +start test using path : default checkpoint | ||
11 | +Test start | ||
12 | +using default checkpoint | ||
13 | +loading checkpoint... | ||
14 | +checkpoint already loaded! | ||
15 | +start test | ||
16 | +data path directory is ../data/Fourth_data/demo | ||
17 | +Inference time 120 images : 4.358 | ||
18 | +finish test | ||
19 | +start test using path : default checkpoint | ||
20 | +Test start | ||
21 | +using default checkpoint | ||
22 | +loading checkpoint... | ||
23 | +[Errno 2] No such file or directory: 'n' | ||
24 | +checkpoint already loaded! | ||
25 | +start test | ||
26 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
27 | +E:/code/detection/data/Fifth_data/All/Flip/1-1.bmp test file submitted | ||
28 | +start test using path : default checkpoint | ||
29 | +Test start | ||
30 | +using default checkpoint | ||
31 | +loading checkpoint... | ||
32 | +[Errno 2] No such file or directory: 'E' | ||
33 | +checkpoint already loaded! | ||
34 | +start test | ||
35 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
36 | +start test using path : default checkpoint | ||
37 | +Test start | ||
38 | +using default checkpoint | ||
39 | +loading checkpoint... | ||
40 | +[Errno 2] No such file or directory: 'E' | ||
41 | +checkpoint already loaded! | ||
42 | +start test | ||
43 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
5 | +Test: [0/1] Time 27.498 (27.498) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823) | ||
6 | + * Prec@1 94.823 | ||
7 | + * Prec@1 94.823 | ||
8 | +Best accuracy: 98.1927713600986 | ||
9 | +[validate_2020-04-01-20-18-55] done | ||
10 | +[validate_2020-04-01-20-18-55] done |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 400114 | ||
3 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
4 | +Fatal error in main loop | ||
5 | +Traceback (most recent call last): | ||
6 | + File "E:\code\detection\trainer\test.py", line 274, in main | ||
7 | + run_model(args, q) | ||
8 | + File "E:\code\detection\trainer\test.py", line 351, in run_model | ||
9 | + model.load_state_dict(checkpoint['state_dict']) | ||
10 | + File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 830, in load_state_dict | ||
11 | + self.__class__.__name__, "\n\t".join(error_msgs))) | ||
12 | +RuntimeError: Error(s) in loading state_dict for DataParallel: | ||
13 | + Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.0.weight", "module.features.6.1.weight", "module.features.6.1.bias", "module.features.6.1.running_mean", "module.features.6.1.running_var", "module.features.8.weight", "module.features.8.bias". | ||
14 | + Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias". | ||
15 | +[validate_2020-04-01-20-21-14] failed |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 154706 | ||
3 | +=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877) | ||
5 | +Test: [0/1] Time 26.242 (26.242) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823) | ||
6 | + * Prec@1 94.823 | ||
7 | + * Prec@1 94.823 | ||
8 | +Best accuracy: 98.1927713600986 | ||
9 | +[validate_2020-04-01-22-45-31] done | ||
10 | +[validate_2020-04-01-22-45-31] done | ||
11 | +set Type | ||
12 | +start test using path : ../data/Fourth_data/demo | ||
13 | +val start | ||
14 | +using default checkpoint | ||
15 | +Number of model parameters: 461559 | ||
16 | +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
17 | +printlog() missing 2 required positional arguments: 'logger' and 'q' | ||
18 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. |
logs/runs/ErrorType/eval.log
0 → 100644
1 | +Number of model parameters: 161111 | ||
2 | +=> loading checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 68) | ||
4 | +Fatal error in main loop | ||
5 | +Traceback (most recent call last): | ||
6 | + File "eval_binary_model.py", line 67, in main | ||
7 | + run_model(args) | ||
8 | + File "eval_binary_model.py", line 132, in run_model | ||
9 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args) | ||
10 | + File "eval_binary_model.py", line 172, in validate | ||
11 | + save_error_case(output.data, target, args, topk=(1,), input=input, save_correct=False) | ||
12 | + File "eval_binary_model.py", line 245, in save_error_case | ||
13 | + os.mkdir(f"eval_results/{args['task']}/error_case") | ||
14 | +FileNotFoundError: [Errno 2] No such file or directory: 'eval_results/ErrorType/error_case' | ||
15 | +[eval] failed | ||
16 | +Number of model parameters: 161111 | ||
17 | +=> loading checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
18 | +=> loaded checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 68) | ||
19 | +Test: [0/3] Time 1.011 (1.011) Loss 0.0242 (0.0242) Prec@1 99.219 (99.219) | ||
20 | + * Prec@1 98.519 | ||
21 | +Best accuracy: 98.51851829246239 | ||
22 | +[eval] done | ||
23 | +Number of model parameters: 161111 | ||
24 | +=> loading checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
25 | +=> loaded checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 48) | ||
26 | +Test: [0/3] Time 1.028 (1.028) Loss 0.0223 (0.0223) Prec@1 98.828 (98.828) | ||
27 | + * Prec@1 97.037 | ||
28 | +Best accuracy: 98.14814758300781 | ||
29 | +[eval] done | ||
30 | +Number of model parameters: 161111 | ||
31 | +=> loading checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
32 | +=> loaded checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 122) | ||
33 | +Test: [0/3] Time 1.032 (1.032) Loss 0.0093 (0.0093) Prec@1 99.609 (99.609) | ||
34 | + * Prec@1 99.074 | ||
35 | +Best accuracy: 99.07407401756004 | ||
36 | +[eval] done | ||
37 | +Number of model parameters: 161111 | ||
38 | +=> loading checkpoint 'output/ErrorType/54623_model=MobilenetV3-ep=3000-block=5/model_best.pth.tar' | ||
39 | +Fatal error in main loop | ||
40 | +Traceback (most recent call last): | ||
41 | + File "eval_binary_model.py", line 67, in main | ||
42 | + run_model(args) | ||
43 | + File "eval_binary_model.py", line 121, in run_model | ||
44 | + model.load_state_dict(checkpoint['state_dict']) | ||
45 | + File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 777, in load_state_dict | ||
46 | + self.__class__.__name__, "\n\t".join(error_msgs))) | ||
47 | +RuntimeError: Error(s) in loading state_dict for DataParallel: | ||
48 | + Missing key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.7.weight", "module.features.7.bias". | ||
49 | + Unexpected key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.1.num_batches_tracked", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.4.num_batches_tracked", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.5.conv.8.num_batches_tracked", "module.features.6.0.weight", "module.features.6.1.weight", "module.features.6.1.bias", "module.features.6.1.running_mean", "module.features.6.1.running_var", "module.features.6.1.num_batches_tracked", "module.features.8.weight", "module.features.8.bias". | ||
50 | +[eval] failed | ||
51 | +Number of model parameters: 161111 | ||
52 | +=> loading checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
53 | +=> loaded checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 62) | ||
54 | +Test: [0/3] Time 1.031 (1.031) Loss 0.0158 (0.0158) Prec@1 98.828 (98.828) | ||
55 | + * Prec@1 98.333 | ||
56 | +Best accuracy: 98.3333332768193 | ||
57 | +[eval] done | ||
58 | +Number of model parameters: 161111 | ||
59 | +=> loading checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
60 | +=> loaded checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 39) | ||
61 | +Test: [0/3] Time 1.031 (1.031) Loss 0.0463 (0.0463) Prec@1 97.266 (97.266) | ||
62 | + * Prec@1 95.556 | ||
63 | +Best accuracy: 98.14814758300781 | ||
64 | +[eval] done | ||
65 | +Number of model parameters: 161111 | ||
66 | +=> loading checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
67 | +=> loaded checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 39) | ||
68 | +Test: [0/1] Time 0.926 (0.926) Loss 0.1220 (0.1220) Prec@1 98.148 (98.148) | ||
69 | + * Prec@1 98.148 | ||
70 | +Best accuracy: 98.14814758300781 | ||
71 | +[eval] done | ||
72 | +Number of model parameters: 161111 | ||
73 | +=> loading checkpoint 'output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
74 | +=> loaded checkpoint 'output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 411) | ||
75 | +Test: [0/1] Time 0.525 (0.525) Loss 0.1709 (0.1709) Prec@1 98.148 (98.148) | ||
76 | + * Prec@1 98.148 | ||
77 | +Best accuracy: 98.14814758300781 | ||
78 | +[eval] done | ||
79 | +Number of model parameters: 161111 | ||
80 | +=> loading checkpoint 'output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
81 | +=> loaded checkpoint 'output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 52) | ||
82 | +Test: [0/1] Time 0.910 (0.910) Loss 0.1544 (0.1544) Prec@1 98.148 (98.148) | ||
83 | + * Prec@1 98.148 | ||
84 | +Best accuracy: 98.14814758300781 | ||
85 | +[eval] done | ||
86 | +Number of model parameters: 161111 | ||
87 | +=> loading checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
88 | +=> loaded checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 62) | ||
89 | +Test: [0/1] Time 0.917 (0.917) Loss 0.1561 (0.1561) Prec@1 98.148 (98.148) | ||
90 | + * Prec@1 98.148 | ||
91 | +Best accuracy: 98.14814758300781 | ||
92 | +[eval] done | ||
93 | +Number of model parameters: 161111 | ||
94 | +=> loading checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
95 | +=> loaded checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 68) | ||
96 | +Test: [0/1] Time 0.921 (0.921) Loss 0.2315 (0.2315) Prec@1 98.148 (98.148) | ||
97 | + * Prec@1 98.148 | ||
98 | +Best accuracy: 98.14814758300781 | ||
99 | +[eval] done | ||
100 | +Number of model parameters: 161111 | ||
101 | +=> loading checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
102 | +=> loaded checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 122) | ||
103 | +Test: [0/1] Time 0.909 (0.909) Loss 0.1854 (0.1854) Prec@1 98.148 (98.148) | ||
104 | + * Prec@1 98.148 | ||
105 | +Best accuracy: 98.14814758300781 | ||
106 | +[eval] done | ||
107 | +Number of model parameters: 161111 | ||
108 | +=> loading checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
109 | +=> loaded checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 48) | ||
110 | +Test: [0/1] Time 0.914 (0.914) Loss 0.1541 (0.1541) Prec@1 98.148 (98.148) | ||
111 | + * Prec@1 98.148 | ||
112 | +Best accuracy: 98.14814758300781 | ||
113 | +[eval] done | ||
114 | +Number of model parameters: 161111 | ||
115 | +=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
116 | +=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829) | ||
117 | +Test: [0/2] Time 0.719 (0.719) Loss 0.0056 (0.0056) Prec@1 100.000 (100.000) | ||
118 | + * Prec@1 99.794 | ||
119 | +Best accuracy: 99.79423805519387 | ||
120 | +[eval] done | ||
121 | +Number of model parameters: 161111 | ||
122 | +=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
123 | +=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829) | ||
124 | +Test: [0/3] Time 0.736 (0.736) Loss 0.1127 (0.1127) Prec@1 99.219 (99.219) | ||
125 | + * Prec@1 99.630 | ||
126 | +Best accuracy: 99.62962962962963 | ||
127 | +[eval] done | ||
128 | +Number of model parameters: 161111 | ||
129 | +=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
130 | +=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829) | ||
131 | +Test: [0/1] Time 0.552 (0.552) Loss 0.5223 (0.5223) Prec@1 98.148 (98.148) | ||
132 | + * Prec@1 98.148 | ||
133 | +Best accuracy: 98.14814758300781 | ||
134 | +[eval] done | ||
135 | +Number of model parameters: 161111 | ||
136 | +=> loading checkpoint 'output/ErrorType/22101_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
137 | +=> loaded checkpoint 'output/ErrorType/22101_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 1472) | ||
138 | +Test: [0/22] Time 0.883 (0.883) Loss 0.1991 (0.1991) Prec@1 95.703 (95.703) | ||
139 | +Test: [10/22] Time 0.134 (0.225) Loss 0.1018 (0.1421) Prec@1 97.656 (96.023) | ||
140 | +Test: [20/22] Time 0.173 (0.185) Loss 0.2016 (0.1328) Prec@1 94.531 (96.038) | ||
141 | + * Prec@1 96.052 | ||
142 | +Best accuracy: 96.0519101479833 | ||
143 | +[eval] done | ||
144 | +Number of model parameters: 461559 | ||
145 | +Number of model parameters: 161111 | ||
146 | +=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
147 | +=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829) | ||
148 | +Test: [0/3] Time 1.110 (1.110) Loss 0.0068 (0.0068) Prec@1 99.609 (99.609) | ||
149 | +Test: [1/3] Time 0.131 (0.620) Loss 0.1115 (0.0591) Prec@1 99.609 (99.609) | ||
150 | +Test: [2/3] Time 0.036 (0.426) Loss 0.0000 (0.0561) Prec@1 100.000 (99.630) | ||
151 | + * Prec@1 99.630 | ||
152 | +Best accuracy: 99.62962962962963 | ||
153 | +[eval] done | ||
154 | +Number of model parameters: 161111 | ||
155 | +=> loading checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
156 | +=> loaded checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
157 | +Test: [0/3] Time 0.769 (0.769) Loss 2.6269 (2.6269) Prec@1 57.031 (57.031) | ||
158 | +Test: [1/3] Time 0.117 (0.443) Loss 2.9399 (2.7834) Prec@1 51.953 (54.492) | ||
159 | +Test: [2/3] Time 0.038 (0.308) Loss 2.8111 (2.7848) Prec@1 53.571 (54.444) | ||
160 | + * Prec@1 54.444 | ||
161 | +Best accuracy: 98.14814758300781 | ||
162 | +[eval] done | ||
163 | +Number of model parameters: 161111 | ||
164 | +=> loading checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
165 | +=> loaded checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
166 | +Test: [0/3] Time 0.919 (0.919) Loss 0.0760 (0.0760) Prec@1 99.609 (99.609) | ||
167 | +Test: [1/3] Time 0.151 (0.535) Loss 0.0059 (0.0410) Prec@1 99.609 (99.609) | ||
168 | +Test: [2/3] Time 0.052 (0.374) Loss 0.0000 (0.0388) Prec@1 100.000 (99.630) | ||
169 | + * Prec@1 99.630 | ||
170 | +Best accuracy: 99.62962962962963 | ||
171 | +[eval] done | ||
172 | +Number of model parameters: 159830 | ||
173 | +=> loading checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
174 | +=> loaded checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 967) | ||
175 | +Test: [0/10] Time 1.848 (1.848) Loss 0.0940 (0.0940) Prec@1 96.875 (96.875) | ||
176 | +Test: [1/10] Time 0.033 (0.940) Loss 0.1398 (0.1169) Prec@1 96.484 (96.680) | ||
177 | +Test: [2/10] Time 0.020 (0.634) Loss 0.0827 (0.1055) Prec@1 98.828 (97.396) | ||
178 | +Test: [3/10] Time 0.022 (0.481) Loss 0.0788 (0.0988) Prec@1 98.047 (97.559) | ||
179 | +Test: [4/10] Time 0.022 (0.389) Loss 0.0631 (0.0917) Prec@1 98.047 (97.656) | ||
180 | +Test: [5/10] Time 0.026 (0.329) Loss 0.0881 (0.0911) Prec@1 97.266 (97.591) | ||
181 | +Test: [6/10] Time 0.026 (0.285) Loss 0.1012 (0.0925) Prec@1 97.656 (97.600) | ||
182 | +Test: [7/10] Time 0.051 (0.256) Loss 0.0947 (0.0928) Prec@1 96.484 (97.461) | ||
183 | +Test: [8/10] Time 0.051 (0.233) Loss 0.1026 (0.0939) Prec@1 96.094 (97.309) | ||
184 | +Test: [9/10] Time 0.161 (0.226) Loss 0.0329 (0.0900) Prec@1 99.363 (97.440) | ||
185 | + * Prec@1 97.440 | ||
186 | +Best accuracy: 97.44006518472844 | ||
187 | +[eval] done | ||
188 | +Number of model parameters: 159830 | ||
189 | +=> loading checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
190 | +=> loaded checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 1013) | ||
191 | +Test: [0/10] Time 1.797 (1.797) Loss 0.1185 (0.1185) Prec@1 98.047 (98.047) | ||
192 | +Test: [1/10] Time 0.056 (0.926) Loss 0.1336 (0.1260) Prec@1 95.703 (96.875) | ||
193 | +Test: [2/10] Time 0.024 (0.625) Loss 0.0722 (0.1081) Prec@1 98.047 (97.266) | ||
194 | +Test: [3/10] Time 0.025 (0.475) Loss 0.0833 (0.1019) Prec@1 97.656 (97.363) | ||
195 | +Test: [4/10] Time 0.049 (0.390) Loss 0.1214 (0.1058) Prec@1 97.656 (97.422) | ||
196 | +Test: [5/10] Time 0.023 (0.329) Loss 0.0616 (0.0984) Prec@1 98.438 (97.591) | ||
197 | +Test: [6/10] Time 0.034 (0.287) Loss 0.0906 (0.0973) Prec@1 96.875 (97.489) | ||
198 | +Test: [7/10] Time 0.032 (0.255) Loss 0.0922 (0.0967) Prec@1 96.875 (97.412) | ||
199 | +Test: [8/10] Time 0.025 (0.229) Loss 0.0631 (0.0929) Prec@1 98.047 (97.483) | ||
200 | +Test: [9/10] Time 0.127 (0.219) Loss 0.1669 (0.0977) Prec@1 96.815 (97.440) | ||
201 | + * Prec@1 97.440 | ||
202 | +Best accuracy: 97.4400648933172 | ||
203 | +[eval] done | ||
204 | +Number of model parameters: 159830 | ||
205 | +=> loading checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
206 | +=> loaded checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2813) | ||
207 | +Test: [0/10] Time 0.771 (0.771) Loss 0.0751 (0.0751) Prec@1 98.828 (98.828) | ||
208 | +Test: [1/10] Time 0.016 (0.393) Loss 0.0138 (0.0444) Prec@1 99.609 (99.219) | ||
209 | +Test: [2/10] Time 0.009 (0.265) Loss 0.0320 (0.0403) Prec@1 98.438 (98.958) | ||
210 | +Test: [3/10] Time 0.009 (0.201) Loss 0.1304 (0.0628) Prec@1 97.656 (98.633) | ||
211 | +Test: [4/10] Time 0.008 (0.163) Loss 0.0823 (0.0667) Prec@1 98.828 (98.672) | ||
212 | +Test: [5/10] Time 0.009 (0.137) Loss 0.0715 (0.0675) Prec@1 97.266 (98.438) | ||
213 | +Test: [6/10] Time 0.008 (0.119) Loss 0.0507 (0.0651) Prec@1 99.609 (98.605) | ||
214 | +Test: [7/10] Time 0.009 (0.105) Loss 0.0677 (0.0654) Prec@1 98.047 (98.535) | ||
215 | +Test: [8/10] Time 0.009 (0.094) Loss 0.0512 (0.0639) Prec@1 97.656 (98.438) | ||
216 | +Test: [9/10] Time 0.055 (0.090) Loss 0.1003 (0.0662) Prec@1 96.815 (98.334) | ||
217 | + * Prec@1 98.334 | ||
218 | +Best accuracy: 98.33401044390638 | ||
219 | +[eval] done | ||
220 | +Number of model parameters: 159830 | ||
221 | +=> loading checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
222 | +=> loaded checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2813) | ||
223 | +Test: [0/1] Time 0.756 (0.756) Loss 0.3889 (0.3889) Prec@1 94.309 (94.309) | ||
224 | + * Prec@1 94.309 | ||
225 | +Best accuracy: 94.30894470214844 | ||
226 | +[eval] done | ||
227 | +Number of model parameters: 460278 | ||
228 | +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
229 | +=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865) | ||
230 | +Test: [0/1] Time 2.244 (2.244) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952) | ||
231 | + * Prec@1 92.952 | ||
232 | +Best accuracy: 92.9515380859375 | ||
233 | +[eval] done | ||
234 | +Number of model parameters: 460278 | ||
235 | +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
236 | +=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865) | ||
237 | +Test: [0/1] Time 2.112 (2.112) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952) | ||
238 | + * Prec@1 92.952 | ||
239 | +Best accuracy: 92.9515380859375 | ||
240 | +[eval] done |
1 | +Number of model parameters: 460278 | ||
2 | +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
3 | +=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865) | ||
4 | +Test: [0/1] Time 29.993 (29.993) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952) | ||
5 | + * Prec@1 92.952 | ||
6 | + * Prec@1 92.952 | ||
7 | +Best accuracy: 92.9515380859375 | ||
8 | +[validate_2020-03-31-16-28-46] done | ||
9 | +[validate_2020-03-31-16-28-46] done | ||
10 | +set All processing | ||
11 | +start test using path : ../data/Fourth_data/demo | ||
12 | +Test start | ||
13 | +[Errno 2] No such file or directory: 'configs/overall_config.yaml' | ||
14 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 460278 | ||
3 | +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865) | ||
5 | +Fatal error in main loop | ||
6 | +Traceback (most recent call last): | ||
7 | + File "E:\code\detection\trainer\test.py", line 270, in main | ||
8 | + run_model(args, q) | ||
9 | + File "E:\code\detection\trainer\test.py", line 358, in run_model | ||
10 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
11 | + File "E:\code\detection\trainer\test.py", line 392, in validate | ||
12 | + if args['predict']['save']: | ||
13 | +KeyError: 'save' | ||
14 | +[validate_2020-03-31-18-52-03] failed |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 461559 | ||
3 | +=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
4 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
5 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
6 | +Error(s) in loading state_dict for DataParallel: | ||
7 | + size mismatch for module.classifier.1.weight: copying a param with shape torch.Size([6, 1280]) from checkpoint, the shape in current model is torch.Size([7, 1280]). | ||
8 | + size mismatch for module.classifier.1.bias: copying a param with shape torch.Size([6]) from checkpoint, the shape in current model is torch.Size([7]). | ||
9 | +start test using path : ../data/Fourth_data/demo | ||
10 | +val start | ||
11 | +using default checkpoint | ||
12 | +Number of model parameters: 461559 | ||
13 | +=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
14 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
15 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
16 | +Error(s) in loading state_dict for DataParallel: | ||
17 | + Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.conv.0.weight", "module.features.6.conv.1.weight", "module.features.6.conv.1.bias", "module.features.6.conv.1.running_mean", "module.features.6.conv.1.running_var", "module.features.6.conv.3.weight", "module.features.6.conv.4.weight", "module.features.6.conv.4.bias", "module.features.6.conv.4.running_mean", "module.features.6.conv.4.running_var", "module.features.6.conv.5.fc.0.weight", "module.features.6.conv.5.fc.2.weight", "module.features.6.conv.7.weight", "module.features.6.conv.8.weight", "module.features.6.conv.8.bias", "module.features.6.conv.8.running_mean", "module.features.6.conv.8.running_var", "module.features.7.0.weight", "module.features.7.1.weight", "module.features.7.1.bias", "module.features.7.1.running_mean", "module.features.7.1.running_var", "module.features.9.weight", "module.features.9.bias". | ||
18 | + Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias". |
1 | +Number of model parameters: 461559 | ||
2 | +=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
3 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
4 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. | ||
5 | +Error(s) in loading state_dict for DataParallel: | ||
6 | + Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.conv.0.weight", "module.features.6.conv.1.weight", "module.features.6.conv.1.bias", "module.features.6.conv.1.running_mean", "module.features.6.conv.1.running_var", "module.features.6.conv.3.weight", "module.features.6.conv.4.weight", "module.features.6.conv.4.bias", "module.features.6.conv.4.running_mean", "module.features.6.conv.4.running_var", "module.features.6.conv.5.fc.0.weight", "module.features.6.conv.5.fc.2.weight", "module.features.6.conv.7.weight", "module.features.6.conv.8.weight", "module.features.6.conv.8.bias", "module.features.6.conv.8.running_mean", "module.features.6.conv.8.running_var", "module.features.7.0.weight", "module.features.7.1.weight", "module.features.7.1.bias", "module.features.7.1.running_mean", "module.features.7.1.running_var", "module.features.9.weight", "module.features.9.bias". | ||
7 | + Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias". |
1 | +using default checkpoint | ||
2 | +Number of model parameters: 161111 | ||
3 | +=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' | ||
4 | +=> loaded checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1884) | ||
5 | +Test: [0/1] Time 25.026 (25.026) Loss 0.2817 (0.2817) Prec@1 95.918 (95.918) | ||
6 | + * Prec@1 95.918 | ||
7 | + * Prec@1 95.918 | ||
8 | +Best accuracy: 95.91837310791016 | ||
9 | +[validate_2020-04-01-22-59-05] done | ||
10 | +[validate_2020-04-01-22-59-05] done | ||
11 | +set All processing | ||
12 | +start test using path : ../data/Fourth_data/demo | ||
13 | +val start | ||
14 | +using default checkpoint | ||
15 | +Number of model parameters: 462840 | ||
16 | +=> loading checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' | ||
17 | +=> loaded checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1617) | ||
18 | +Test: [0/3] Time 31.288 (31.288) Loss 0.2660 (0.2660) Prec@1 95.703 (95.703) | ||
19 | +Test: [1/3] Time 7.587 (19.437) Loss 0.3209 (0.2934) Prec@1 95.312 (95.508) | ||
20 | +Test: [2/3] Time 6.625 (15.167) Loss 0.1835 (0.2602) Prec@1 96.396 (95.777) | ||
21 | + * Prec@1 95.777 | ||
22 | + * Prec@1 95.777 | ||
23 | +Best accuracy: 95.77656669512757 | ||
24 | +[validate_2020-04-01-23-00-04] done | ||
25 | +[validate_2020-04-01-23-00-04] done | ||
26 | +set error | ||
27 | +Test를 수행하기 위해 데이터를 입력해 주세요. | ||
28 | +Test를 수행하기 위해 데이터를 입력해 주세요. |
1 | +using user's checkpoint E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar | ||
2 | +Number of model parameters: 161111 | ||
3 | +=> loading checkpoint 'E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' | ||
4 | +=> loaded checkpoint 'E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000) | ||
5 | +Test: [0/2] Time 26.118 (26.118) Loss 0.2720 (0.2720) Prec@1 93.359 (93.359) | ||
6 | +Test: [1/2] Time 0.848 (13.483) Loss 0.4744 (0.3686) Prec@1 91.453 (92.449) | ||
7 | + * Prec@1 92.449 | ||
8 | + * Prec@1 92.449 | ||
9 | +Best accuracy: 95.91836762720224 | ||
10 | +[validate_2020-04-03-17-24-24] done | ||
11 | +[validate_2020-04-03-17-24-24] done | ||
12 | +set All processing | ||
13 | +E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar checkpoint file submitted | ||
14 | +E:/code/detection/data/Fifth_data/All Test dir submitted | ||
15 | +val start | ||
16 | +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
17 | +Number of model parameters: 462840 | ||
18 | +=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' | ||
19 | +=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000) | ||
20 | +Test: [0/3] Time 32.591 (32.591) Loss 0.1575 (0.1575) Prec@1 94.531 (94.531) | ||
21 | +Test: [1/3] Time 8.179 (20.385) Loss 0.2475 (0.2025) Prec@1 93.750 (94.141) | ||
22 | +Test: [2/3] Time 7.374 (16.048) Loss 0.4568 (0.2794) Prec@1 94.595 (94.278) | ||
23 | + * Prec@1 94.278 | ||
24 | + * Prec@1 94.278 | ||
25 | +Best accuracy: 96.04904700754774 | ||
26 | +[validate_2020-04-03-17-39-50] done | ||
27 | +[validate_2020-04-03-17-39-50] done | ||
28 | +E:/code/detection/data/Fifth_data/All/Empty/1-5.bmp test file submitted | ||
29 | +Test start | ||
30 | +start test using path : E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
31 | +using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar | ||
32 | +loading checkpoint... | ||
33 | +checkpoint already loaded! | ||
34 | +start test | ||
35 | +single_file_test() missing 1 required positional argument: 'q' | ||
36 | +실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요. |
main.py
0 → 100644
1 | +import argparse | ||
2 | +import random | ||
3 | +import os | ||
4 | +import cv2 | ||
5 | +import logging | ||
6 | +import datetime | ||
7 | + | ||
8 | +import torch | ||
9 | +import torch.nn as nn | ||
10 | +import torchvision.datasets as datasets | ||
11 | +import torchvision.transforms as transforms | ||
12 | +from torchvision.utils import save_image | ||
13 | + | ||
14 | +from model import mobilenetv3 | ||
15 | +from utils import get_args_from_yaml, MyImageFolder | ||
16 | +from get_mean_std import get_params | ||
17 | + | ||
18 | +## 해당 코드는 전체 inference를 모두 담은 code. | ||
19 | + | ||
20 | +# make Logger | ||
21 | +logger = logging.getLogger(os.path.dirname(__name__)) | ||
22 | +logger.setLevel(logging.INFO) | ||
23 | + | ||
24 | +# make Logger stream | ||
25 | +streamHandler = logging.StreamHandler() | ||
26 | +logger.addHandler(streamHandler) | ||
27 | + | ||
28 | +if not os.path.exists('eval_results/main'): | ||
29 | + os.mkdir('eval_results/main') | ||
30 | + | ||
31 | +if not os.path.exists('eval_results/main/Normal'): | ||
32 | + os.mkdir('eval_results/main/Normal') | ||
33 | + | ||
34 | +if not os.path.exists('eval_results/main/Crack'): | ||
35 | + os.mkdir('eval_results/main/Crack') | ||
36 | + | ||
37 | +if not os.path.exists('eval_results/main/Empty'): | ||
38 | + os.mkdir('eval_results/main/Empty') | ||
39 | + | ||
40 | +if not os.path.exists('eval_results/main/Flip'): | ||
41 | + os.mkdir('eval_results/main/Flip') | ||
42 | + | ||
43 | +if not os.path.exists('eval_results/main/Pollute'): | ||
44 | + os.mkdir('eval_results/main/Pollute') | ||
45 | + | ||
46 | +if not os.path.exists('eval_results/main/Double'): | ||
47 | + os.mkdir('eval_results/main/Double') | ||
48 | + | ||
49 | +if not os.path.exists('eval_results/main/Leave'): | ||
50 | + os.mkdir('eval_results/main/Leave') | ||
51 | + | ||
52 | +if not os.path.exists('eval_results/main/Scratch'): | ||
53 | + os.mkdir('eval_results/main/Scratch') | ||
54 | + | ||
55 | + | ||
56 | +def main(Error_args, Error_Type_args): | ||
57 | + logdir = f"logs/main/" | ||
58 | + if not os.path.exists(logdir): | ||
59 | + os.mkdir(logdir) | ||
60 | + fileHander = logging.FileHandler(logdir + f"{datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S')}_log.log") | ||
61 | + logger.addHandler(fileHander) | ||
62 | + | ||
63 | + run(Error_args, Error_Type_args) | ||
64 | + | ||
65 | +def run(Error_args, Error_Type_args): | ||
66 | + Error_args['checkpoint'] = "output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar" | ||
67 | + Error_Type_args['checkpoint'] = "output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar" | ||
68 | + | ||
69 | + Error_model = mobilenetv3(n_class= Error_args['model']['class'], blocknum=Error_args['model']['blocks']) | ||
70 | + Error_Type_model = mobilenetv3(n_class=Error_Type_args['model']['class'], blocknum=Error_Type_args['model']['blocks']) | ||
71 | + | ||
72 | + gpus = Error_args['gpu'] | ||
73 | + resize_size = Error_args['train']['size'] | ||
74 | + | ||
75 | + torch.cuda.set_device(gpus[0]) | ||
76 | + with torch.cuda.device(gpus[0]): | ||
77 | + Error_model = Error_model.cuda() | ||
78 | + Error_Type_model = Error_Type_model.cuda() | ||
79 | + | ||
80 | + Error_model = torch.nn.DataParallel(Error_model, device_ids=gpus, output_device=gpus[0]) | ||
81 | + Error_Type_model = torch.nn.DataParallel(Error_Type_model, device_ids=gpus, output_device=gpus[0]) | ||
82 | + | ||
83 | + Error_checkpoint = torch.load(Error_args['checkpoint']) | ||
84 | + Error_Type_checkpoint = torch.load(Error_Type_args['checkpoint']) | ||
85 | + | ||
86 | + Error_model.load_state_dict(Error_checkpoint['state_dict']) | ||
87 | + Error_Type_model.load_state_dict(Error_Type_checkpoint['state_dict']) | ||
88 | + | ||
89 | + mean, std = get_params(Error_args['data']['test'], resize_size) | ||
90 | + normalize = transforms.Normalize(mean=[mean[0].item()], | ||
91 | + std=[std[0].item()]) | ||
92 | + | ||
93 | + transform = transforms.Compose([ | ||
94 | + transforms.Resize((resize_size, resize_size)), | ||
95 | + transforms.Grayscale(), | ||
96 | + transforms.ToTensor(), | ||
97 | + normalize | ||
98 | + ]) | ||
99 | + | ||
100 | + dataset = MyImageFolder(Error_args['data']['test'], transform) | ||
101 | + | ||
102 | + print(len(dataset)) | ||
103 | + | ||
104 | + loader = torch.utils.data.DataLoader( | ||
105 | + dataset, batch_size=Error_args['predict']['batch-size'], shuffle=False, | ||
106 | + num_workers=Error_args['predict']['worker'], pin_memory=True | ||
107 | + ) | ||
108 | + | ||
109 | + for data in loader: | ||
110 | + (input, _), (path, _) = data | ||
111 | + input= input.cuda() | ||
112 | + | ||
113 | + output = Error_model(input) | ||
114 | + _, output = output.topk(1 ,1 ,True,True) | ||
115 | + | ||
116 | + error_cases = torch.ones((1,1,64,64)).cuda() | ||
117 | + new_paths = [] | ||
118 | + | ||
119 | + error = 0 | ||
120 | + normal = 0 | ||
121 | + for idx in range(input.shape[0]): | ||
122 | + # if Error Case | ||
123 | + | ||
124 | + if output[idx] == 0: | ||
125 | + error_cases = torch.cat((error_cases, input[idx:idx+1]), dim=0) | ||
126 | + new_paths.append(path[idx]) | ||
127 | + error = error +1 | ||
128 | + # Normal Case | ||
129 | + else: | ||
130 | + img = cv2.imread(path[idx]) | ||
131 | + cv2.imwrite(f"eval_results/main/Normal/{path[idx].split('/')[-1]}", img) | ||
132 | + normal = normal+1 | ||
133 | + | ||
134 | + print(f"error path : {len(new_paths)}") | ||
135 | + print(f"error : {error}") | ||
136 | + print(f"normal : {normal}") | ||
137 | + | ||
138 | + error_cases = error_cases[1:] | ||
139 | + print(error_cases.shape[0]) | ||
140 | + | ||
141 | + output = Error_Type_model(error_cases) | ||
142 | + _, output = output.topk(1 ,1 ,True,True) | ||
143 | + | ||
144 | + for idx in range(error_cases.shape[0]): | ||
145 | + # Crack | ||
146 | + if output[idx] == 0: | ||
147 | + img = cv2.imread(new_paths[idx]) | ||
148 | + cv2.imwrite(f"eval_results/main/Crack/{new_paths[idx].split('/')[-1]}", img) | ||
149 | + | ||
150 | + # Double | ||
151 | + elif output[idx] == 1: | ||
152 | + img = cv2.imread(new_paths[idx]) | ||
153 | + cv2.imwrite(f"eval_results/main/Double/{new_paths[idx].split('/')[-1]}", img) | ||
154 | + | ||
155 | + # Empty | ||
156 | + elif output[idx] == 2: | ||
157 | + img = cv2.imread(new_paths[idx]) | ||
158 | + cv2.imwrite(f"eval_results/main/Empty/{new_paths[idx].split('/')[-1]}", img) | ||
159 | + | ||
160 | + # Flip | ||
161 | + elif output[idx] == 3: | ||
162 | + img = cv2.imread(new_paths[idx]) | ||
163 | + cv2.imwrite(f"eval_results/main/Flip/{new_paths[idx].split('/')[-1]}", img) | ||
164 | + | ||
165 | + # Leave | ||
166 | + elif output[idx] == 4: | ||
167 | + img = cv2.imread(new_paths[idx]) | ||
168 | + cv2.imwrite(f"eval_results/main/Leave/{new_paths[idx].split('/')[-1]}", img) | ||
169 | + | ||
170 | + # Pollute | ||
171 | + elif output[idx] == 5: | ||
172 | + img = cv2.imread(new_paths[idx]) | ||
173 | + cv2.imwrite(f"eval_results/main/Pollute/{new_paths[idx].split('/')[-1]}", img) | ||
174 | + | ||
175 | + # Scratch | ||
176 | + elif output[idx] == 6: | ||
177 | + img = cv2.imread(new_paths[idx]) | ||
178 | + cv2.imwrite(f"eval_results/main/Scratch/{new_paths[idx].split('/')[-1]}", img) | ||
179 | + | ||
180 | + | ||
181 | +if __name__ == '__main__': | ||
182 | + Error_args = get_args_from_yaml("configs/Error_config.yml") | ||
183 | + Error_Type_args = get_args_from_yaml("configs/ErrorType_config.yml") | ||
184 | + main(Error_args, Error_Type_args) | ||
185 | + | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
make_noisy.py
0 → 100644
1 | +import torch | ||
2 | +import torch.nn as nn | ||
3 | +from model import mobilenetv3 | ||
4 | +import argparse | ||
5 | +import torchvision | ||
6 | +from torchvision.transforms import transforms | ||
7 | +import torchvision.datasets as datasets | ||
8 | +from augmentations import RandAugment | ||
9 | +from get_mean_std import get_params | ||
10 | +from torch.utils.data.sampler import SubsetRandomSampler | ||
11 | +import numpy as np | ||
12 | +import os | ||
13 | +import cv2 | ||
14 | +from utils import MyImageFolder | ||
15 | + | ||
16 | +class ConcatDataset(torch.utils.data.Dataset): | ||
17 | + def __init__(self, *datasets): | ||
18 | + self.datasets = datasets | ||
19 | + | ||
20 | + def __getitem__(self, i): | ||
21 | + return tuple(d[i %len(d)] for d in self.datasets) | ||
22 | + | ||
23 | + def __len__(self): | ||
24 | + return max(len(d) for d in self.datasets) | ||
25 | + | ||
26 | + | ||
27 | +def make_dir(): | ||
28 | + if not os.path.exists('../data/Fourth_data/teacher_data/Double'): | ||
29 | + os.mkdir('../data/Fourth_data/teacher_data/Double') | ||
30 | + | ||
31 | + if not os.path.exists('../data/Fourth_data/teacher_data/Flip'): | ||
32 | + os.mkdir('../data/Fourth_data/teacher_data/Flip') | ||
33 | + | ||
34 | + if not os.path.exists('../data/Fourth_data/teacher_data/Scratch'): | ||
35 | + os.mkdir('../data/Fourth_data/teacher_data/Scratch') | ||
36 | + | ||
37 | + if not os.path.exists('../data/Fourth_data/teacher_data/Leave'): | ||
38 | + os.mkdir('../data/Fourth_data/teacher_data/Leave') | ||
39 | + | ||
40 | + if not os.path.exists('../data/Fourth_data/teacher_data/Normal'): | ||
41 | + os.mkdir('../data/Fourth_data/teacher_data/Normal') | ||
42 | + | ||
43 | + if not os.path.exists('../data/Fourth_data/teacher_data/Empty'): | ||
44 | + os.mkdir('../data/Fourth_data/teacher_data/Empty') | ||
45 | + | ||
46 | + | ||
47 | +parser = argparse.ArgumentParser(description='Process make noisy student model') | ||
48 | +parser.add_argument('--checkpoint_path', type=str, help='checkpoint path') | ||
49 | +parser.add_argument('--size', type=int, help='resize integer of input') | ||
50 | +parser.add_argument('--batch_size', type=int, default=256,help='set batch size') | ||
51 | +parser.add_argument('--teacher_checkpoint_path', type=str, help='teacher first checkpoint path') | ||
52 | +parser.add_argument('--Labeled_dataset_path', default='../data/Fourth_data/noisy_data/Labeled', type=str, help='path of dataset') | ||
53 | +parser.add_argument('--Unlabeled_dataset_path', default='../data/Fourth_data/noisy_data/Unlabeled', type=str, help='path of unlabeled dataset') | ||
54 | +parser.add_argument('--num_workers', default=8, type=int, help="number of gpu worker") | ||
55 | +parser.add_argument('--epochs', default=350, type=int, help='epoch') | ||
56 | +parser.add_argument('--finetune_epochs', default=2, type=int, help='finetuning epochs') | ||
57 | +parser.add_argument('--data_save_path', default='../data/Fourth_data/teacher_data', type=str, help='teacher save unlabeled data in this path') | ||
58 | +args = parser.parse_args() | ||
59 | + | ||
60 | +print(args) | ||
61 | + | ||
62 | +# by paper of https://arxiv.org/pdf/1911.04252.pdf | ||
63 | +Aug_number = 2 | ||
64 | +Aug_magnitude = 27 | ||
65 | + | ||
66 | +#my customize network | ||
67 | +blocks = [4,5,6,7,8] | ||
68 | + | ||
69 | +# data loader parameters | ||
70 | +kwargs = {'num_workers': args.num_workers, 'pin_memory': True} | ||
71 | + | ||
72 | +Labeled_mean, Labeled_std = get_params(args.Labeled_dataset_path, args.size) | ||
73 | +Unlabeled_mean, Unlabeled_std = get_params(args.Unlabeled_dataset_path, args.size) | ||
74 | + | ||
75 | +transform_labeled = transforms.Compose([ | ||
76 | + transforms.Resize((args.size, args.size)), | ||
77 | + transforms.RandomCrop(args.size, padding=4), | ||
78 | + transforms.RandomHorizontalFlip(), | ||
79 | + transforms.ToTensor(), | ||
80 | + transforms.Normalize(Labeled_mean[0].item(), Labeled_std[0].item()) | ||
81 | +]) | ||
82 | + | ||
83 | +#이건 Teacher가 raw data를 받아서 판단하는거기 때문에 따로 Augmentation할 필요 x | ||
84 | +transform_unlabeled = transforms.Compose([ | ||
85 | + transforms.Resize((args.size, args.size)), | ||
86 | + transforms.RandomCrop(args.size, padding=4), | ||
87 | + transforms.RandomHorizontalFlip(), | ||
88 | + transforms.ToTensor(), | ||
89 | + transforms.Normalize(Unlabeled_mean[0].item(), Unlabeled_std[0].item()) | ||
90 | +]) | ||
91 | + | ||
92 | +# Add RandAugment with N, M(hyperparameter) | ||
93 | +transform_labeled.transforms.insert(0, RandAugment(Aug_number, Aug_magnitude)) | ||
94 | + | ||
95 | +# set dataset | ||
96 | +Labeled_dataset = datasets.ImageFolder(args.Labeled_dataset_path, transform_labeled) | ||
97 | +Unlabeled_dataset = MyImageFolder(args.Unlabeled_dataset_path, transform_unlabeled) | ||
98 | + | ||
99 | +labeled_data_loader = torch.utils.data.DataLoader( | ||
100 | + Labeled_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) | ||
101 | + | ||
102 | +unlabeled_data_loader = torch.utils.data.DataLoader( | ||
103 | + Unlabeled_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) | ||
104 | + | ||
105 | +# noisy teacher은 student보다 더 작게 설정하며, dropout을 0으로 설정. | ||
106 | +noisy_teacher_model = mobilenetv3(n_class=2, dropout=0.0, blocknum=4) | ||
107 | +checkpoint = torch.load(args.teacher_checkpoint_path) | ||
108 | +noisy_teacher_model.load_state_dict(checkpoint['state_dict']) | ||
109 | + | ||
110 | +# make loss function | ||
111 | +criterion = nn.CrossEntropyLoss() | ||
112 | + | ||
113 | +# make class directory | ||
114 | +make_dir() | ||
115 | + | ||
116 | +classes = os.listdir(args.data_save_path) | ||
117 | +classes.sort() | ||
118 | + | ||
119 | +for block in blocks: | ||
120 | + #noisy student는 더 크게 설정하고 dropout은 논문에 나와있는대로 0.5로 설정. | ||
121 | + noisy_student_model = mobilenetv3(n_class=2, dropout=0.5, blocknum=block, stochastic=True) | ||
122 | + | ||
123 | + noisy_student_model.cuda() | ||
124 | + noisy_teacher_model.cuda() | ||
125 | + criterion.cuda() | ||
126 | + | ||
127 | + # make optimizer same as official code lr = 0.128 and decays by 0.97 every 2.4epochs | ||
128 | + optimizer = torch.optim.RMSprop(noisy_student_model.parameters(), lr=0.128, weight_decay=0.9, momentum=0.9) | ||
129 | + | ||
130 | + # exp scheduler like tf offical code | ||
131 | + scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,0.7) | ||
132 | + | ||
133 | + for epoch in range(args.epochs): | ||
134 | + # unlabeled data를 labeling하는 과정. | ||
135 | + for idx, data in enumerate(unlabeled_data_loader): | ||
136 | + (unlabeled_input, _), (path, _) = data | ||
137 | + | ||
138 | + unlabeled_input = unlabeled_input.cuda() | ||
139 | + | ||
140 | + output=noisy_teacher_model(unlabeled_input) | ||
141 | + | ||
142 | + prob = F.softmax(output, dim=1) | ||
143 | + | ||
144 | + for idx, p in enumerate(prob): | ||
145 | + indices = torch.topk(p,1).indices.tolist() | ||
146 | + | ||
147 | + img = cv2.imread(path[idx]) | ||
148 | + | ||
149 | + cv2.imwrite(f"{args.data_save_path}/{classes[indices[0]]}/{path[idx].split('/')[-1]}", img) | ||
150 | + | ||
151 | + # teacher 모델이 구성한 data에 대해서 다시 loader 구성. | ||
152 | + transform_teacher_data = transforms.Compose([ | ||
153 | + transforms.Resize((args.size, args.size)), | ||
154 | + transforms.RandomCrop(args.size, padding=4), | ||
155 | + transforms.RandomHorizontalFlip(), | ||
156 | + transforms.ToTensor(), | ||
157 | + transforms.Normalize(Unlabeled_mean[0].item(), Unlabeled_std[0].item()) | ||
158 | + ]) | ||
159 | + transform_teacher_data.transforms.insert(0, RandAugment(Aug_number, Aug_magnitude)) | ||
160 | + | ||
161 | + teacher_data = datasets.ImageFolder(args.data_save_path, transform_teacher_data) | ||
162 | + | ||
163 | + teacher_data_loader = torch.utils.data.DataLoader( | ||
164 | + teacher_data, batch_size=args.batch_size, shuffle=True, **kwargs) | ||
165 | + | ||
166 | + merged_dataset = ConcatDataset(teacher_data_loader, labeled_data_loader) #앞은 teacher가 예측한거 뒤는 실제 데이터 | ||
167 | + | ||
168 | + merged_data_loader = torch.utils.data.DataLoader( | ||
169 | + merged_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True) | ||
170 | + | ||
171 | + #일단 코드상으로는 unlabeled된 data에 대해서 hard하게 구성. todo: soft labeling. | ||
172 | + for i, (input, target) in enumerate(merged_data_loader): | ||
173 | + input = input.cuda() | ||
174 | + target = target.cuda() | ||
175 | + | ||
176 | + output = noisy_student_model(input) | ||
177 | + | ||
178 | + loss = criterion(target, output) | ||
179 | + | ||
180 | + optimizer.zero_grad() | ||
181 | + loss.backward() | ||
182 | + optimizer.step() | ||
183 | + | ||
184 | + #논문에서는 2.4epoch마다라고 하였지만 현재는 2에폭마다로 설정. | ||
185 | + if epoch % 2 == 0: | ||
186 | + scheduler.step() | ||
187 | + | ||
188 | + # iterative learning. | ||
189 | + noisy_teacher_model = noisy_student_model | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
make_script.py
0 → 100644
1 | +from model import mobilenetv3 | ||
2 | +import torch | ||
3 | +import torch.nn as nn | ||
4 | + | ||
5 | +model = mobilenetv3(n_class=8, blocknum=6) | ||
6 | + | ||
7 | +model = torch.nn.DataParallel(model) | ||
8 | +device = torch.device('cpu') | ||
9 | +checkpoint = torch.load('output/All/48860_model=MobilenetV3-ep=3000-block=6-class=8/model_best.pth.tar', map_location = device) | ||
10 | + | ||
11 | +model.load_state_dict(checkpoint['state_dict']) | ||
12 | + | ||
13 | +model.to(device) | ||
14 | + | ||
15 | +model.eval() | ||
16 | + | ||
17 | +x = torch.randn(256,1,224,224) | ||
18 | + | ||
19 | +print(x.shape) | ||
20 | + | ||
21 | +jit_model = torch.jit.trace(model.module,x) | ||
22 | + | ||
23 | +jit_model.save("mobilenetv3.pt") | ||
24 | + | ||
25 | +#check jitModel is working | ||
26 | +#output = jit_model(torch.ones(3,1,224,224)) | ||
27 | +#print(output) | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
model.py
0 → 100644
1 | +import torch | ||
2 | +import torch.nn as nn | ||
3 | +from torch.nn import functional as F | ||
4 | +import math | ||
5 | +from utils import stochastic_depth | ||
6 | +############# Mobile Net V3 ############# | ||
7 | +def make_divisible(x, divisible_by=8): | ||
8 | + import numpy as np | ||
9 | + return int(np.ceil(x * 1. / divisible_by) * divisible_by) | ||
10 | + | ||
11 | +def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU): | ||
12 | + return nn.Sequential( | ||
13 | + conv_layer(inp, oup, 3, stride, 1, bias=False), | ||
14 | + norm_layer(oup), | ||
15 | + nlin_layer(inplace=True) | ||
16 | + ) | ||
17 | + | ||
18 | +# without bn | ||
19 | +def conv_block(inp, oup, stride, conv_layer=nn.Conv2d, nlin_layer=nn.ReLU): | ||
20 | + return nn.Sequential( | ||
21 | + conv_layer(inp, oup, 3, stride, 1, bias=False), | ||
22 | + nlin_layer(0.1, inplace=True) | ||
23 | + ) | ||
24 | + | ||
25 | +def trans_conv_block(inp, oup, stride, conv_layer=nn.ConvTranspose2d, nlin_layer=nn.ReLU): | ||
26 | + return nn.Sequential( | ||
27 | + conv_layer(inp, oup, 2, 2), #Transpose convolution을 통하여 가로세로 2배로. | ||
28 | + nlin_layer(0.1, inplace=True) | ||
29 | + ) | ||
30 | + | ||
31 | +def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU): | ||
32 | + return nn.Sequential( | ||
33 | + conv_layer(inp, oup, 1, 1, 0, bias=False), | ||
34 | + norm_layer(oup), | ||
35 | + nlin_layer(inplace=True) | ||
36 | + ) | ||
37 | + | ||
38 | +class Hswish(nn.Module): | ||
39 | + def __init__(self, inplace=True): | ||
40 | + super(Hswish, self).__init__() | ||
41 | + self.inplace = inplace | ||
42 | + | ||
43 | + def forward(self, x): | ||
44 | + return x * F.relu6(x + 3., inplace=self.inplace) / 6. | ||
45 | + | ||
46 | + | ||
47 | +class Hsigmoid(nn.Module): | ||
48 | + def __init__(self, inplace=True): | ||
49 | + super(Hsigmoid, self).__init__() | ||
50 | + self.inplace = inplace | ||
51 | + | ||
52 | + def forward(self, x): | ||
53 | + return F.relu6(x + 3., inplace=self.inplace) / 6. | ||
54 | + | ||
55 | + | ||
56 | +class SEModule(nn.Module): | ||
57 | + def __init__(self, channel, reduction=4): | ||
58 | + super(SEModule, self).__init__() | ||
59 | + self.avg_pool = nn.AdaptiveAvgPool2d(1) | ||
60 | + self.fc = nn.Sequential( | ||
61 | + nn.Linear(channel, channel // reduction, bias=False), | ||
62 | + nn.ReLU(inplace=True), | ||
63 | + nn.Linear(channel // reduction, channel, bias=False), | ||
64 | + Hsigmoid() | ||
65 | + # nn.Sigmoid() | ||
66 | + ) | ||
67 | + | ||
68 | + def forward(self, x): | ||
69 | + b, c, _, _ = x.size() | ||
70 | + y = self.avg_pool(x).view(b, c) | ||
71 | + y = self.fc(y).view(b, c, 1, 1) | ||
72 | + return x * y.expand_as(x) | ||
73 | + | ||
74 | + | ||
75 | +class Identity(nn.Module): | ||
76 | + def __init__(self, channel): | ||
77 | + super(Identity, self).__init__() | ||
78 | + | ||
79 | + def forward(self, x): | ||
80 | + return x | ||
81 | + | ||
82 | +class MobileBottleneck(nn.Module): | ||
83 | + def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE', stochastic=False, block_ratio=None): | ||
84 | + super(MobileBottleneck, self).__init__() | ||
85 | + assert stride in [1, 2] | ||
86 | + assert kernel in [3, 5] | ||
87 | + padding = (kernel - 1) // 2 | ||
88 | + self.use_res_connect = stride == 1 and inp == oup | ||
89 | + self.use_stochastic = stochastic | ||
90 | + self.block_ratio = block_ratio | ||
91 | + | ||
92 | + conv_layer = nn.Conv2d | ||
93 | + norm_layer = nn.BatchNorm2d | ||
94 | + if nl == 'RE': | ||
95 | + nlin_layer = nn.ReLU # or ReLU6 | ||
96 | + elif nl == 'HS': | ||
97 | + nlin_layer = Hswish | ||
98 | + else: | ||
99 | + raise NotImplementedError | ||
100 | + if se: | ||
101 | + SELayer = SEModule | ||
102 | + else: | ||
103 | + SELayer = Identity | ||
104 | + | ||
105 | + self.conv = nn.Sequential( | ||
106 | + # pw | ||
107 | + conv_layer(inp, exp, 1, 1, 0, bias=False), | ||
108 | + norm_layer(exp), | ||
109 | + nlin_layer(inplace=True), | ||
110 | + # dw | ||
111 | + conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False), | ||
112 | + norm_layer(exp), | ||
113 | + SELayer(exp), | ||
114 | + nlin_layer(inplace=True), | ||
115 | + # pw-linear | ||
116 | + conv_layer(exp, oup, 1, 1, 0, bias=False), | ||
117 | + norm_layer(oup), | ||
118 | + ) | ||
119 | + | ||
120 | + def forward(self, x): | ||
121 | + if self.use_res_connect: | ||
122 | + if self.use_stochastic: | ||
123 | + return x + stochastic_depth(self.conv(x),self.training, 0.2 * self.block_ratio) | ||
124 | + else: | ||
125 | + return x + self.conv(x) | ||
126 | + else: | ||
127 | + if self.use_stochastic: | ||
128 | + return stochastic_depth(self.conv(x), self.training, 0.2 * self.block_ratio) | ||
129 | + else: | ||
130 | + return self.conv(x) | ||
131 | + | ||
132 | + | ||
133 | +class MobileNetV3(nn.Module): | ||
134 | + def __init__(self, n_class, input_size=64, dropout=0.8, width_mult=1.0, blocknum=4, stochastic=False): | ||
135 | + super(MobileNetV3, self).__init__() | ||
136 | + input_channel = 16 | ||
137 | + last_channel = 1280 | ||
138 | + self.mobileblock = [ | ||
139 | + # k, exp, c, se, nl, s, | ||
140 | + [3, 16, 16, True, 'RE', 2], | ||
141 | + [3, 72, 24, False, 'RE', 2], | ||
142 | + [3, 88, 24, False, 'RE', 1], | ||
143 | + [5, 96, 40, True, 'HS', 2], | ||
144 | + [5, 240, 40, True, 'HS', 1], | ||
145 | + [5, 240, 40, True, 'HS', 1], | ||
146 | + [5, 120, 48, True, 'HS', 1], | ||
147 | + [5, 144, 48, True, 'HS', 1], | ||
148 | + [5, 288, 96, True, 'HS', 2], | ||
149 | + [5, 576, 96, True, 'HS', 1], | ||
150 | + [5, 576, 96, True, 'HS', 1], | ||
151 | + ] | ||
152 | + | ||
153 | + mobile_setting = [ | ||
154 | + self.mobileblock[idx] for idx in range(blocknum) | ||
155 | + ] | ||
156 | + self.last_exp = self.mobileblock[blocknum-1][1] | ||
157 | + | ||
158 | + # building first layer | ||
159 | + assert input_size % 32 == 0 | ||
160 | + last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel | ||
161 | + self.features = [conv_bn(1, input_channel, 2, nlin_layer=Hswish)] #input channel change | ||
162 | + self.classifier = [] | ||
163 | + | ||
164 | + # building mobile blocks | ||
165 | + for idx, (k, exp, c, se, nl, s) in enumerate(mobile_setting): | ||
166 | + output_channel = make_divisible(c * width_mult) | ||
167 | + exp_channel = make_divisible(exp * width_mult) | ||
168 | + self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl, stochastic=stochastic, block_ratio=float(idx) / float(blocknum))) | ||
169 | + input_channel = output_channel | ||
170 | + | ||
171 | + # building last several layers | ||
172 | + last_conv = make_divisible(self.last_exp * width_mult) | ||
173 | + self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)) | ||
174 | + # self.features.append(SEModule(last_conv)) # refer to paper Table2, but I think this is a mistake | ||
175 | + self.features.append(nn.AdaptiveAvgPool2d(1)) | ||
176 | + self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0)) | ||
177 | + self.features.append(Hswish(inplace=True)) | ||
178 | + | ||
179 | + # make it nn.Sequential | ||
180 | + self.features = nn.Sequential(*self.features) | ||
181 | + # building classifier | ||
182 | + self.classifier = nn.Sequential( | ||
183 | + nn.Dropout(p=dropout), # refer to paper section 6 | ||
184 | + nn.Linear(last_channel, n_class), | ||
185 | + ) | ||
186 | + | ||
187 | + self._initialize_weights() | ||
188 | + | ||
189 | + def forward(self, x): | ||
190 | + x = self.features(x) | ||
191 | + x = x.mean(3).mean(2) | ||
192 | + x = self.classifier(x) | ||
193 | + return x | ||
194 | + | ||
195 | + def _initialize_weights(self): | ||
196 | + # weight initialization | ||
197 | + for m in self.modules(): | ||
198 | + if isinstance(m, nn.Conv2d): | ||
199 | + nn.init.kaiming_normal_(m.weight, mode='fan_out') | ||
200 | + if m.bias is not None: | ||
201 | + nn.init.zeros_(m.bias) | ||
202 | + elif isinstance(m, nn.BatchNorm2d): | ||
203 | + nn.init.ones_(m.weight) | ||
204 | + nn.init.zeros_(m.bias) | ||
205 | + elif isinstance(m, nn.Linear): | ||
206 | + nn.init.normal_(m.weight, 0, 0.01) | ||
207 | + if m.bias is not None: | ||
208 | + nn.init.zeros_(m.bias) | ||
209 | + | ||
210 | + | ||
211 | +def mobilenetv3(pretrained=False, **kwargs): | ||
212 | + model = MobileNetV3(**kwargs) | ||
213 | + if pretrained: | ||
214 | + state_dict = torch.load('mobilenetv3_small_67.4.pth.tar') | ||
215 | + model.load_state_dict(state_dict, strict=True) | ||
216 | + # raise NotImplementedError | ||
217 | + return model | ||
218 | + | ||
219 | + | ||
220 | +### EFFICIENT NET ### | ||
221 | +from utils import ( | ||
222 | + round_filters, | ||
223 | + round_repeats, | ||
224 | + drop_connect, | ||
225 | + get_same_padding_conv2d, | ||
226 | + get_model_params, | ||
227 | + efficientnet_params, | ||
228 | + load_pretrained_weights, | ||
229 | + Swish, | ||
230 | + MemoryEfficientSwish, | ||
231 | +) | ||
232 | + | ||
233 | + | ||
234 | +class MBConvBlock(nn.Module): | ||
235 | + """ | ||
236 | + Mobile Inverted Residual Bottleneck Block | ||
237 | + Args: | ||
238 | + block_args (namedtuple): BlockArgs, see above | ||
239 | + global_params (namedtuple): GlobalParam, see above | ||
240 | + Attributes: | ||
241 | + has_se (bool): Whether the block contains a Squeeze and Excitation layer. | ||
242 | + """ | ||
243 | + | ||
244 | + def __init__(self, block_args, global_params): | ||
245 | + super().__init__() | ||
246 | + self._block_args = block_args | ||
247 | + self._bn_mom = 1 - global_params.batch_norm_momentum | ||
248 | + self._bn_eps = global_params.batch_norm_epsilon | ||
249 | + self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1) | ||
250 | + self.id_skip = block_args.id_skip # skip connection and drop connect | ||
251 | + | ||
252 | + # Get static or dynamic convolution depending on image size | ||
253 | + Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) | ||
254 | + | ||
255 | + # Expansion phase | ||
256 | + inp = self._block_args.input_filters # number of input channels | ||
257 | + oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels | ||
258 | + if self._block_args.expand_ratio != 1: | ||
259 | + self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) | ||
260 | + self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) | ||
261 | + | ||
262 | + # Depthwise convolution phase | ||
263 | + k = self._block_args.kernel_size | ||
264 | + s = self._block_args.stride | ||
265 | + self._depthwise_conv = Conv2d( | ||
266 | + in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise | ||
267 | + kernel_size=k, stride=s, bias=False) | ||
268 | + self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) | ||
269 | + | ||
270 | + # Squeeze and Excitation layer, if desired | ||
271 | + if self.has_se: | ||
272 | + num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio)) | ||
273 | + self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) | ||
274 | + self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) | ||
275 | + | ||
276 | + # Output phase | ||
277 | + final_oup = self._block_args.output_filters | ||
278 | + self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) | ||
279 | + self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) | ||
280 | + self._swish = MemoryEfficientSwish() | ||
281 | + | ||
282 | + def forward(self, inputs, drop_connect_rate=None): | ||
283 | + """ | ||
284 | + :param inputs: input tensor | ||
285 | + :param drop_connect_rate: drop connect rate (float, between 0 and 1) | ||
286 | + :return: output of block | ||
287 | + """ | ||
288 | + | ||
289 | + # Expansion and Depthwise Convolution | ||
290 | + x = inputs | ||
291 | + if self._block_args.expand_ratio != 1: | ||
292 | + x = self._swish(self._bn0(self._expand_conv(inputs))) | ||
293 | + x = self._swish(self._bn1(self._depthwise_conv(x))) | ||
294 | + | ||
295 | + # Squeeze and Excitation | ||
296 | + if self.has_se: | ||
297 | + x_squeezed = F.adaptive_avg_pool2d(x, 1) | ||
298 | + x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed))) | ||
299 | + x = torch.sigmoid(x_squeezed) * x | ||
300 | + | ||
301 | + x = self._bn2(self._project_conv(x)) | ||
302 | + | ||
303 | + # Skip connection and drop connect | ||
304 | + input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters | ||
305 | + if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: | ||
306 | + if drop_connect_rate: | ||
307 | + x = drop_connect(x, p=drop_connect_rate, training=self.training) | ||
308 | + x = x + inputs # skip connection | ||
309 | + return x | ||
310 | + | ||
311 | + def set_swish(self, memory_efficient=True): | ||
312 | + """Sets swish function as memory efficient (for training) or standard (for export)""" | ||
313 | + self._swish = MemoryEfficientSwish() if memory_efficient else Swish() | ||
314 | + | ||
315 | + | ||
316 | +class EfficientNet(nn.Module): | ||
317 | + """ | ||
318 | + An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods | ||
319 | + Args: | ||
320 | + blocks_args (list): A list of BlockArgs to construct blocks | ||
321 | + global_params (namedtuple): A set of GlobalParams shared between blocks | ||
322 | + Example: | ||
323 | + model = EfficientNet.from_pretrained('efficientnet-b0') | ||
324 | + """ | ||
325 | + | ||
326 | + def __init__(self, blocks_args=None, global_params=None): | ||
327 | + super().__init__() | ||
328 | + assert isinstance(blocks_args, list), 'blocks_args should be a list' | ||
329 | + assert len(blocks_args) > 0, 'block args must be greater than 0' | ||
330 | + self._global_params = global_params | ||
331 | + self._blocks_args = blocks_args | ||
332 | + | ||
333 | + # Get static or dynamic convolution depending on image size | ||
334 | + Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) | ||
335 | + | ||
336 | + # Batch norm parameters | ||
337 | + bn_mom = 1 - self._global_params.batch_norm_momentum | ||
338 | + bn_eps = self._global_params.batch_norm_epsilon | ||
339 | + | ||
340 | + # Stem | ||
341 | + in_channels = 1 # rgb | ||
342 | + out_channels = round_filters(32, self._global_params) # number of output channels | ||
343 | + self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) | ||
344 | + self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) | ||
345 | + | ||
346 | + # Build blocks | ||
347 | + self._blocks = nn.ModuleList([]) | ||
348 | + for block_args in self._blocks_args: | ||
349 | + | ||
350 | + # Update block input and output filters based on depth multiplier. | ||
351 | + block_args = block_args._replace( | ||
352 | + input_filters=round_filters(block_args.input_filters, self._global_params), | ||
353 | + output_filters=round_filters(block_args.output_filters, self._global_params), | ||
354 | + num_repeat=round_repeats(block_args.num_repeat, self._global_params) | ||
355 | + ) | ||
356 | + | ||
357 | + # The first block needs to take care of stride and filter size increase. | ||
358 | + self._blocks.append(MBConvBlock(block_args, self._global_params)) | ||
359 | + if block_args.num_repeat > 1: | ||
360 | + block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) | ||
361 | + for _ in range(block_args.num_repeat - 1): | ||
362 | + self._blocks.append(MBConvBlock(block_args, self._global_params)) | ||
363 | + | ||
364 | + # Head | ||
365 | + in_channels = block_args.output_filters # output of final block | ||
366 | + out_channels = round_filters(1280, self._global_params) | ||
367 | + self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) | ||
368 | + self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) | ||
369 | + | ||
370 | + # Final linear layer | ||
371 | + self._avg_pooling = nn.AdaptiveAvgPool2d(1) | ||
372 | + self._dropout = nn.Dropout(self._global_params.dropout_rate) | ||
373 | + self._fc = nn.Linear(out_channels, self._global_params.num_classes) | ||
374 | + self._swish = MemoryEfficientSwish() | ||
375 | + | ||
376 | + def set_swish(self, memory_efficient=True): | ||
377 | + """Sets swish function as memory efficient (for training) or standard (for export)""" | ||
378 | + self._swish = MemoryEfficientSwish() if memory_efficient else Swish() | ||
379 | + for block in self._blocks: | ||
380 | + block.set_swish(memory_efficient) | ||
381 | + | ||
382 | + | ||
383 | + def extract_features(self, inputs): | ||
384 | + """ Returns output of the final convolution layer """ | ||
385 | + | ||
386 | + # Stem | ||
387 | + x = self._swish(self._bn0(self._conv_stem(inputs))) | ||
388 | + | ||
389 | + # Blocks | ||
390 | + for idx, block in enumerate(self._blocks): | ||
391 | + drop_connect_rate = self._global_params.drop_connect_rate | ||
392 | + if drop_connect_rate: | ||
393 | + drop_connect_rate *= float(idx) / len(self._blocks) | ||
394 | + x = block(x, drop_connect_rate=drop_connect_rate) | ||
395 | + | ||
396 | + # Head | ||
397 | + x = self._swish(self._bn1(self._conv_head(x))) | ||
398 | + | ||
399 | + return x | ||
400 | + | ||
401 | + def forward(self, inputs): | ||
402 | + """ Calls extract_features to extract features, applies final linear layer, and returns logits. """ | ||
403 | + bs = inputs.size(0) | ||
404 | + # Convolution layers | ||
405 | + x = self.extract_features(inputs) | ||
406 | + | ||
407 | + # Pooling and final linear layer | ||
408 | + x = self._avg_pooling(x) | ||
409 | + x = x.view(bs, -1) | ||
410 | + x = self._dropout(x) | ||
411 | + x = self._fc(x) | ||
412 | + return x | ||
413 | + | ||
414 | + @classmethod | ||
415 | + def from_name(cls, model_name, override_params=None): | ||
416 | + cls._check_model_name_is_valid(model_name) | ||
417 | + blocks_args, global_params = get_model_params(model_name, override_params) | ||
418 | + return cls(blocks_args, global_params) | ||
419 | + | ||
420 | + @classmethod | ||
421 | + def from_pretrained(cls, model_name, advprop=False, num_classes=1000, in_channels=3): | ||
422 | + model = cls.from_name(model_name, override_params={'num_classes': num_classes}) | ||
423 | + load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000), advprop=advprop) | ||
424 | + if in_channels != 3: | ||
425 | + Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size) | ||
426 | + out_channels = round_filters(32, model._global_params) | ||
427 | + model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) | ||
428 | + return model | ||
429 | + | ||
430 | + @classmethod | ||
431 | + def get_image_size(cls, model_name): | ||
432 | + cls._check_model_name_is_valid(model_name) | ||
433 | + _, _, res, _ = efficientnet_params(model_name) | ||
434 | + return res | ||
435 | + | ||
436 | + @classmethod | ||
437 | + def _check_model_name_is_valid(cls, model_name): | ||
438 | + """ Validates model name. """ | ||
439 | + valid_models = ['efficientnet-b'+str(i) for i in range(9)] | ||
440 | + if model_name not in valid_models: | ||
441 | + raise ValueError('model_name should be one of: ' + ', '.join(valid_models)) | ||
442 | + | ||
443 | +class AutoEncoder(nn.Module): | ||
444 | + def __init__(self, input_channel=1): | ||
445 | + super(AutoEncoder, self).__init__() | ||
446 | + self.input_channel = input_channel | ||
447 | + self.encoder, self.decoder = self.make_encoder_layers() | ||
448 | + | ||
449 | + def make_encoder_layers(self, input_channel=1,layer_num=6): | ||
450 | + encoder_output_channels = [64,56,48,32,24,20] | ||
451 | + decoder_output_channels = [24,32,48,56,64,1] | ||
452 | + encoder = [] | ||
453 | + decoder = [] | ||
454 | + | ||
455 | + #make encoder | ||
456 | + for i in range(layer_num): | ||
457 | + encoder.append(conv_block(input_channel, encoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU)) | ||
458 | + encoder.append(conv_block(encoder_output_channels[i], encoder_output_channels[i],1,nlin_layer=nn.LeakyReLU)) | ||
459 | + encoder.append(nn.MaxPool2d(2,2)) | ||
460 | + if(i != layer_num-1): | ||
461 | + encoder.append(nn.Dropout2d(p=0.3)) | ||
462 | + input_channel = encoder_output_channels[i] | ||
463 | + | ||
464 | + #make decoder | ||
465 | + for i in range(layer_num): | ||
466 | + decoder.append(nn.Upsample(scale_factor=2)) | ||
467 | + decoder.append(conv_block(input_channel, input_channel, 1, nlin_layer=nn.LeakyReLU)) | ||
468 | + decoder.append(conv_block(input_channel, decoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU)) | ||
469 | + if(i != layer_num-1): | ||
470 | + decoder.append(nn.Dropout2d(p=0.3)) | ||
471 | + input_channel = decoder_output_channels[i] | ||
472 | + | ||
473 | + | ||
474 | + encoder = nn.Sequential(*encoder) | ||
475 | + decoder = nn.Sequential(*decoder) | ||
476 | + return encoder, decoder | ||
477 | + | ||
478 | + def forward(self, x): | ||
479 | + x = self.encoder(x) | ||
480 | + print(x.shape) | ||
481 | + x = self.decoder(x) | ||
482 | + return x | ||
483 | +class AutoEncoder_s(nn.Module): | ||
484 | + def __init__(self, input_channel=1): | ||
485 | + super(AutoEncoder, self).__init__() | ||
486 | + self.input_channel = input_channel | ||
487 | + self.encoder, self.decoder = self.make_encoder_layers() | ||
488 | + | ||
489 | + def make_encoder_layers(self, input_channel=1,layer_num=3): | ||
490 | + encoder_output_channels = [64,56,48] | ||
491 | + decoder_output_channels = [56,64,1] | ||
492 | + encoder = [] | ||
493 | + decoder = [] | ||
494 | + | ||
495 | + #make encoder | ||
496 | + for i in range(layer_num): | ||
497 | + encoder.append(conv_block(input_channel, encoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU)) | ||
498 | + encoder.append(conv_block(encoder_output_channels[i], encoder_output_channels[i],1,nlin_layer=nn.LeakyReLU)) | ||
499 | + encoder.append(nn.MaxPool2d(2,2)) | ||
500 | + if(i != layer_num-1): | ||
501 | + encoder.append(nn.Dropout2d(p=0.3)) | ||
502 | + input_channel = encoder_output_channels[i] | ||
503 | + | ||
504 | + #make decoder | ||
505 | + for i in range(layer_num): | ||
506 | + decoder.append(nn.Upsample(scale_factor=2)) | ||
507 | + decoder.append(conv_block(input_channel, input_channel, 1, nlin_layer=nn.LeakyReLU)) | ||
508 | + decoder.append(conv_block(input_channel, decoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU)) | ||
509 | + if(i != layer_num-1): | ||
510 | + decoder.append(nn.Dropout2d(p=0.3)) | ||
511 | + input_channel = decoder_output_channels[i] | ||
512 | + | ||
513 | + | ||
514 | + encoder = nn.Sequential(*encoder) | ||
515 | + decoder = nn.Sequential(*decoder) | ||
516 | + return encoder, decoder | ||
517 | + | ||
518 | + def forward(self, x): | ||
519 | + x = self.encoder(x) | ||
520 | + print(x.shape) | ||
521 | + x = self.decoder(x) | ||
522 | + return x | ||
523 | + | ||
524 | + | ||
525 | +class pytorch_autoencoder(nn.Module): | ||
526 | + def __init__(self): | ||
527 | + super(pytorch_autoencoder, self).__init__() | ||
528 | + self.encoder = nn.Sequential( | ||
529 | + nn.Conv2d(1, 32, 2, stride=2, padding=0), | ||
530 | + nn.ReLU(True), | ||
531 | + nn.MaxPool2d(2, stride=2), | ||
532 | + nn.Conv2d(32, 16, 2, stride=2, padding=0), | ||
533 | + nn.ReLU(True), | ||
534 | + ) | ||
535 | + self.decoder = nn.Sequential( | ||
536 | + nn.ConvTranspose2d(16, 32, 2, stride=2), | ||
537 | + nn.ReLU(True), | ||
538 | + nn.ConvTranspose2d(32, 64, 2, stride=2, padding=0), | ||
539 | + nn.ReLU(True), | ||
540 | + nn.ConvTranspose2d(64, 1, 2, stride=2, padding=0), | ||
541 | + nn.Tanh() | ||
542 | + ) | ||
543 | + | ||
544 | + def forward(self, x): | ||
545 | + x = self.encoder(x) | ||
546 | + x = self.decoder(x) | ||
547 | + return x | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
requirements.txt
0 → 100644
1 | +adabound==0.0.5 | ||
2 | +altgraph==0.17 | ||
3 | +click==7.1.1 | ||
4 | +cycler==0.10.0 | ||
5 | +future==0.18.2 | ||
6 | +kiwisolver==1.1.0 | ||
7 | +lxml==4.5.0 | ||
8 | +macholib==1.14 | ||
9 | +matplotlib==2.2.4 | ||
10 | +numpy==1.18.1 | ||
11 | +opencv-python==4.2.0.32 | ||
12 | +pandas==1.0.3 | ||
13 | +pefile==2019.4.18 | ||
14 | +Pillow==6.2.2 | ||
15 | +protobuf==3.11.3 | ||
16 | +PyInstaller==3.6 | ||
17 | +pyparsing==2.4.6 | ||
18 | +PyQt5==5.14.1 | ||
19 | +PyQt5-sip==12.7.1 | ||
20 | +python-dateutil==2.8.1 | ||
21 | +pytz==2019.3 | ||
22 | +pywin32-ctypes==0.2.0 | ||
23 | +PyYAML==5.3.1 | ||
24 | +scipy==1.4.1 | ||
25 | +six==1.14.0 | ||
26 | +tensorboard-logger==0.1.0 | ||
27 | +torch==1.4.0+cpu | ||
28 | +torchvision==0.2.2.post3 | ||
29 | +tqdm==4.44.1 |
test.py
0 → 100644
1 | +import argparse | ||
2 | +import csv | ||
3 | +import logging | ||
4 | +import os | ||
5 | +import shutil | ||
6 | +import time | ||
7 | +import sys | ||
8 | +import zipfile | ||
9 | +from torch.utils.data.sampler import SubsetRandomSampler | ||
10 | +import numpy as np | ||
11 | +import PIL | ||
12 | +import torch | ||
13 | +import torch.backends.cudnn as cudnn | ||
14 | +import torch.nn as nn | ||
15 | +import torch.nn.parallel | ||
16 | +import torch.optim | ||
17 | +import torch.utils.data | ||
18 | +import torchvision.datasets as datasets | ||
19 | +import torchvision.transforms as transforms | ||
20 | +import torch.nn.functional as F | ||
21 | + | ||
22 | +import cv2 | ||
23 | +import matplotlib.pyplot as plt | ||
24 | +import pandas as pd | ||
25 | +from get_mean_std import get_params | ||
26 | + | ||
27 | +sys.path.append(os.path.join(os.path.dirname(__name__))) | ||
28 | + | ||
29 | +from model import mobilenetv3, EfficientNet | ||
30 | +from torchvision.utils import save_image | ||
31 | +from focal_loss import FocalLoss | ||
32 | +from visualize.grad_cam import make_grad_cam | ||
33 | +from utils import accuracy, AverageMeter, get_args_from_yaml, MyImageFolder, printlog, FastDataLoader | ||
34 | +from PIL import Image | ||
35 | +import torchvision.transforms.functional as TF | ||
36 | +#from utils import restapi, preprocessing | ||
37 | + | ||
38 | +global error_case_idx, correct_case_idx | ||
39 | + | ||
40 | +logger = logging.getLogger(os.path.dirname(__name__)) | ||
41 | +logger.setLevel(logging.INFO) | ||
42 | + | ||
43 | +streamHandler = logging.StreamHandler() | ||
44 | +logger.addHandler(streamHandler) | ||
45 | + | ||
46 | +def make_type_dir(): | ||
47 | + if not os.path.exists('test_result'): | ||
48 | + os.mkdir('test_result') | ||
49 | + if not os.path.exists('test_result/Type'): | ||
50 | + os.mkdir('test_result/Type') | ||
51 | + if not os.path.exists('test_result/Type/Double'): | ||
52 | + os.mkdir('test_result/Type/Double') | ||
53 | + if not os.path.exists('test_result/Type/Flip'): | ||
54 | + os.mkdir('test_result/Type/Flip') | ||
55 | + if not os.path.exists('test_result/Type/Scratch'): | ||
56 | + os.mkdir('test_result/Type/Scratch') | ||
57 | + if not os.path.exists('test_result/Type/Leave'): | ||
58 | + os.mkdir('test_result/Type/Leave') | ||
59 | + if not os.path.exists('test_result/Type/Empty'): | ||
60 | + os.mkdir('test_result/Type/Empty') | ||
61 | + if not os.path.exists('test_result/Type/Crack'): | ||
62 | + os.mkdir('test_result/Type/Crack') | ||
63 | + | ||
64 | +def make_all_dir(): | ||
65 | + if not os.path.exists('test_result'): | ||
66 | + os.mkdir('test_result') | ||
67 | + if not os.path.exists('test_result/All'): | ||
68 | + os.mkdir('test_result/All') | ||
69 | + if not os.path.exists('test_result/All/Double'): | ||
70 | + os.mkdir('test_result/All/Double') | ||
71 | + if not os.path.exists('test_result/All/Flip'): | ||
72 | + os.mkdir('test_result/All/Flip') | ||
73 | + if not os.path.exists('test_result/All/Scratch'): | ||
74 | + os.mkdir('test_result/All/Scratch') | ||
75 | + if not os.path.exists('test_result/All/Leave'): | ||
76 | + os.mkdir('test_result/All/Leave') | ||
77 | + if not os.path.exists('test_result/All/Normal'): | ||
78 | + os.mkdir('test_result/All/Normal') | ||
79 | + if not os.path.exists('test_result/All/Empty'): | ||
80 | + os.mkdir('test_result/All/Empty') | ||
81 | + if not os.path.exists('test_result/All/Crack'): | ||
82 | + os.mkdir('test_result/All/Crack') | ||
83 | + if not os.path.exists('test_result/All/Normal'): | ||
84 | + os.mkdir('test_result/All/Normal') | ||
85 | + | ||
86 | +def make_error_dir(): | ||
87 | + if not os.path.exists('test_result'): | ||
88 | + os.mkdir('test_result') | ||
89 | + if not os.path.exists('test_result/Error'): | ||
90 | + os.mkdir('test_result/Error') | ||
91 | + if not os.path.exists('test_result/Error/Normal'): | ||
92 | + os.mkdir('test_result/Error/Normal') | ||
93 | + if not os.path.exists('test_result/Error/Error'): | ||
94 | + os.mkdir('test_result/Error/Error') | ||
95 | + | ||
96 | +def get_savepath_classes_args(mode): | ||
97 | + if mode == "Error": | ||
98 | + save_path = './test_result/Error' | ||
99 | + classes = ['Error', 'Normal'] | ||
100 | + args = get_args_from_yaml("configs/Error_config.yml") | ||
101 | + | ||
102 | + elif mode == "Type": | ||
103 | + save_path = './test_result/Type' | ||
104 | + classes = ['Crack', 'Double', 'Empty', 'Flip', 'Leave','Pollute', 'Scratch'] | ||
105 | + args = get_args_from_yaml('configs/ErrorType_config.yml') | ||
106 | + | ||
107 | + else: | ||
108 | + save_path = './test_result/All' | ||
109 | + classes = ['Crack','Double', 'Empty', 'Flip', 'Leave', 'Normal','Pollute', 'Scratch'] | ||
110 | + args = get_args_from_yaml('configs/All_config.yml') | ||
111 | + | ||
112 | + return save_path, classes, args | ||
113 | + | ||
114 | + | ||
115 | +# 여러개의 인풋을 Test 수행할 때 사용되는 함수. | ||
116 | +def test(testloader, model, mode): | ||
117 | + with torch.no_grad(): | ||
118 | + save_path, classes, _ = get_savepath_classes_args(mode) | ||
119 | + model.eval() | ||
120 | + for _, data in enumerate(testloader): | ||
121 | + (input, _), (path, _) = data | ||
122 | + if torch.cuda.is_available(): | ||
123 | + input = input.cuda() | ||
124 | + | ||
125 | + output = model(input) | ||
126 | + prob = F.softmax(output, dim=1) | ||
127 | + | ||
128 | + for idx, p in enumerate(prob): | ||
129 | + values = torch.topk(p,2).values.tolist() | ||
130 | + indices = torch.topk(p,2).indices.tolist() | ||
131 | + img = cv2.imread(path[idx]) | ||
132 | + cv2.imwrite(f"{save_path}/{classes[indices[0]]}/{classes[indices[0]]}={values[0]}__{classes[indices[1]]}={values[1]}.bmp", img) | ||
133 | + | ||
134 | +# Test input이 하나의 파일일 때 사용되는 함수. | ||
135 | +# Path = 데이터 원본의 경로, mode = 수행하는 Task. | ||
136 | +def single_file_test(input, model, path, mode, q): | ||
137 | + with torch.no_grad(): | ||
138 | + save_path, classes, _ = get_savepath_classes_args(mode) | ||
139 | + model.eval() | ||
140 | + if torch.cuda.is_available(): | ||
141 | + input = input.cuda() | ||
142 | + | ||
143 | + start = time.time() | ||
144 | + output = model(input) | ||
145 | + prob = F.softmax(output, dim=1) | ||
146 | + q.put(f"Inference time 1 image : {str(round(time.time() - start , 5))}") | ||
147 | + | ||
148 | + for idx, p in enumerate(prob): | ||
149 | + values = torch.topk(p,2).values.tolist() # 확률 | ||
150 | + indices = torch.topk(p,2).indices.tolist() # 인덱스 | ||
151 | + img = cv2.imread(path) | ||
152 | + cv2.imwrite(f"{save_path}/{classes[indices[0]]}/{classes[indices[0]]}={values[0]}__{classes[indices[1]]}={values[1]}.bmp", img) | ||
153 | + | ||
154 | +# 유저가 지정해준 checkpoint가 없으면 config 에 있는 checkpoint를 사용. | ||
155 | +# data는 config에 지정된 data를 활용. | ||
156 | +def UI_validate(mode, q, **kwargs): | ||
157 | + try: | ||
158 | + _, _, args = get_savepath_classes_args(mode) | ||
159 | + args['model']['blocks'] = kwargs['blocknum'] | ||
160 | + args['data']['val'] = kwargs["data_path"] | ||
161 | + | ||
162 | + q.put(f"using user's checkpoint {kwargs['ck_path']}") | ||
163 | + args['checkpoint'] = kwargs['ck_path'] | ||
164 | + | ||
165 | + timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) | ||
166 | + args['id'] = "validate_" + timestring | ||
167 | + main(args, q) | ||
168 | + except Exception as ex: | ||
169 | + q.put(f"실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.") | ||
170 | + logger.info(ex) | ||
171 | + | ||
172 | +# test는 항상 유저가 지정해주는 data를 활용. | ||
173 | +# 만약 없다면 demo 즉 UI단에서 지정한 default data를 활용. | ||
174 | +# mode: Error, ErrorType, All | ||
175 | +# path: data path | ||
176 | +# test_mode: File or dir | ||
177 | +def UI_test(mode, path, test_mode, q, **kwargs): | ||
178 | + try: | ||
179 | + _, _, args = get_savepath_classes_args(mode) | ||
180 | + make_error_dir() | ||
181 | + make_type_dir() | ||
182 | + make_all_dir() | ||
183 | + | ||
184 | + args['model']['blocks'] = kwargs['blocknum'] | ||
185 | + args['train']['size'] = kwargs['size'] | ||
186 | + | ||
187 | + q.put(f"using user's checkpoint {kwargs['ck_path']}") | ||
188 | + args['checkpoint'] = kwargs['ck_path'] | ||
189 | + | ||
190 | + timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) | ||
191 | + args['id'] = "test_" + timestring | ||
192 | + gpus = args['gpu'] | ||
193 | + resize_size = args['train']['size'] | ||
194 | + model = mobilenetv3(n_class=args['model']['class'], blocknum=args['model']['blocks']) | ||
195 | + if torch.cuda.is_available(): | ||
196 | + torch.cuda.set_device(gpus[0]) | ||
197 | + with torch.cuda.device(gpus[0]): | ||
198 | + model = model.cuda() | ||
199 | + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0]) | ||
200 | + else: | ||
201 | + model = torch.nn.DataParallel(model) | ||
202 | + device = torch.device("cpu") | ||
203 | + model.to(device) | ||
204 | + | ||
205 | + q.put("loading checkpoint...") | ||
206 | + if torch.cuda.is_available(): | ||
207 | + checkpoint = torch.load(args['checkpoint']) | ||
208 | + else: | ||
209 | + checkpoint = torch.load(args['checkpoint'],map_location=torch.device('cpu')) | ||
210 | + | ||
211 | + model.load_state_dict(checkpoint['state_dict']) | ||
212 | + q.put("checkpoint already loaded!") | ||
213 | + q.put("start test") | ||
214 | + | ||
215 | + # 테스트 데이터가 디렉토일 경우. | ||
216 | + # 다중 데이터를 받아야 하므로 Pytorch에서 구성된 Dataloader 이용. | ||
217 | + # 해당 코드에서는 Data의 Path까지 출력해주는 Loader을 추가로 구성함. (저장하기 위하여) | ||
218 | + if test_mode == 'dir': | ||
219 | + normalize = transforms.Normalize(mean=[0.4015], std=[0.2165]) | ||
220 | + transform_test = transforms.Compose([ | ||
221 | + transforms.Resize((resize_size,resize_size)), | ||
222 | + transforms.Grayscale(), | ||
223 | + transforms.ToTensor(), | ||
224 | + normalize | ||
225 | + ]) | ||
226 | + q.put(f"data path directory is {path}") | ||
227 | + testset = MyImageFolder(path, transform=transform_test) | ||
228 | + test_loader = FastDataLoader(testset, batch_size=args['predict']['batch-size'], shuffle=False, num_workers=8) | ||
229 | + start = time.time() | ||
230 | + test(test_loader, model, mode) | ||
231 | + q.put(f"Inference time {len(testset)} images : {str(round(time.time() - start , 5))}") | ||
232 | + | ||
233 | + # 테스트 데이터가 하나의 파일일 경우. | ||
234 | + # 하나의 데이터이므로 바로 이미지를 텐서로 바꿈. | ||
235 | + # Dataloader에서는 transforms.compose로 데이터 Preprocessing을 묶었지만 | ||
236 | + # 여기서는 Dataloader를 사용하지 않기 때문에 transforms.functional을 이용하여 직접 변경. | ||
237 | + # trasnforms 함수와 같은 형태를 가지고 있기 때문에 쉽게 이해 가능. | ||
238 | + else: | ||
239 | + image = Image.open(path) | ||
240 | + x = TF.resize(image, (resize_size,resize_size)) # 리사이즈 | ||
241 | + x = TF.to_grayscale(x) # 그레이스케일 적용 | ||
242 | + x = TF.to_tensor(x) # 텐서 변환. | ||
243 | + x.unsqueeze_(0) # 0-dim에 차원 추가. | ||
244 | + start = time.time() | ||
245 | + single_file_test(x, model, path, mode, q) | ||
246 | + q.put(f"Inference time 1 image : {str(round(time.time() - start , 5))}") | ||
247 | + q.put('finish test') | ||
248 | + | ||
249 | + except Exception as ex: | ||
250 | + q.put("실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.") | ||
251 | + logger.info(ex) | ||
252 | + | ||
253 | +def UI_temp(path,q,model): | ||
254 | + try: | ||
255 | + resize_size = 64 | ||
256 | + image = Image.open(path) | ||
257 | + x = TF.resize(image, (resize_size, resize_size)) | ||
258 | + x = TF.to_grayscale(x) | ||
259 | + x = TF.to_tensor(x) | ||
260 | + x.unsqueeze_(0) | ||
261 | + | ||
262 | + single_file_test(x, model, path, "Error", q) | ||
263 | + | ||
264 | + q.put('temp test finish') | ||
265 | + except Exception as ex: | ||
266 | + q.put("실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.") | ||
267 | + logger.info(ex) | ||
268 | + | ||
269 | +def UI_temp2(): | ||
270 | + batch_size = 256 | ||
271 | + | ||
272 | + train_transforms = transforms.Compose([ | ||
273 | + transforms.Resize((256,256)), | ||
274 | + transforms.ToTensor() | ||
275 | + ]) | ||
276 | + | ||
277 | + train_dataset = datasets.ImageFolder("../data/Fifth_data/All", train_transforms) | ||
278 | + | ||
279 | + train_loader = FastDataLoader(dataset=train_dataset, | ||
280 | + batch_size=batch_size, shuffle=True) | ||
281 | + i=0 | ||
282 | + start = time.time() | ||
283 | + for x,y in train_loader: | ||
284 | + i = i+1 | ||
285 | + pass | ||
286 | + end = time.time() | ||
287 | + print((end - start)/i) | ||
288 | + | ||
289 | +def main(args, q=None): | ||
290 | + try: | ||
291 | + logdir = f'logs/runs/{args["task"]}/' | ||
292 | + | ||
293 | + if not os.path.exists(logdir): | ||
294 | + os.makedirs(logdir) | ||
295 | + | ||
296 | + fileHandler = logging.FileHandler(logdir + f'{args["id"]}.log') | ||
297 | + logger.addHandler(fileHandler) | ||
298 | + | ||
299 | + # 2. eval | ||
300 | + run_model(args, q) | ||
301 | + | ||
302 | + # 3. Done | ||
303 | + printlog(f"[{args['id']}] done", logger, q) | ||
304 | + | ||
305 | + except Exception as ex: | ||
306 | + printlog("실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.", logger, q) | ||
307 | + logger.info(ex) | ||
308 | + | ||
309 | +def run_model(args, q=None): | ||
310 | + resize_size = args['train']['size'] | ||
311 | + | ||
312 | + gpus = args['gpu'] | ||
313 | + | ||
314 | + mean, std = get_params(args['data']['val'], resize_size) | ||
315 | + | ||
316 | + normalize = transforms.Normalize(mean=[mean[0].item()], | ||
317 | + std=[std[0].item()]) | ||
318 | + normalize_factor = [mean, std] | ||
319 | + | ||
320 | + # data loader | ||
321 | + transform_test = transforms.Compose([ | ||
322 | + transforms.Resize((resize_size,resize_size)), | ||
323 | + transforms.Grayscale(), | ||
324 | + transforms.ToTensor(), | ||
325 | + normalize | ||
326 | + ]) | ||
327 | + kwargs = {'num_workers': args['predict']['worker'], 'pin_memory': True} | ||
328 | + test_data = MyImageFolder(args['data']['val'], transform_test) | ||
329 | + | ||
330 | + random_seed = 10 | ||
331 | + validation_ratio = 0.1 | ||
332 | + num_test = len(test_data) | ||
333 | + indices = list(range(num_test)) | ||
334 | + split = int(np.floor(validation_ratio * num_test)) | ||
335 | + | ||
336 | + np.random.seed(random_seed) | ||
337 | + np.random.shuffle(indices) | ||
338 | + | ||
339 | + valid_idx = indices[:split] | ||
340 | + valid_sampler = SubsetRandomSampler(valid_idx) | ||
341 | + | ||
342 | + | ||
343 | + val_loader = FastDataLoader( | ||
344 | + test_data, batch_size=args['predict']['batch-size'], sampler=valid_sampler, | ||
345 | + **kwargs) | ||
346 | + | ||
347 | + criterion = nn.CrossEntropyLoss() | ||
348 | + | ||
349 | + # load model | ||
350 | + model = mobilenetv3(n_class=args['model']['class'], blocknum=args['model']['blocks']) | ||
351 | + | ||
352 | + # get the number of model parameters | ||
353 | + logger.info('Number of model parameters: {}'.format( | ||
354 | + sum([p.data.nelement() for p in model.parameters()]))) | ||
355 | + | ||
356 | + if torch.cuda.is_available(): | ||
357 | + torch.cuda.set_device(gpus[0]) | ||
358 | + with torch.cuda.device(gpus[0]): | ||
359 | + model = model.cuda() | ||
360 | + criterion = criterion.cuda() | ||
361 | + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0]) | ||
362 | + else: | ||
363 | + model = torch.nn.DataParallel(model) | ||
364 | + device = torch.device("cpu") | ||
365 | + model.to(device) | ||
366 | + criterion.to(device) | ||
367 | + | ||
368 | + | ||
369 | + logger.info("=> loading checkpoint '{}'".format(args['checkpoint'])) | ||
370 | + | ||
371 | + if torch.cuda.is_available(): | ||
372 | + checkpoint = torch.load(args['checkpoint']) | ||
373 | + else: | ||
374 | + checkpoint = torch.load(args['checkpoint'], map_location=torch.device('cpu')) | ||
375 | + args['start_epoch'] = checkpoint['epoch'] | ||
376 | + best_prec1 = checkpoint['best_prec1'] | ||
377 | + model.load_state_dict(checkpoint['state_dict']) | ||
378 | + logger.info("=> loaded checkpoint '{}' (epoch {})" | ||
379 | + .format(args['checkpoint'], checkpoint['epoch'])) | ||
380 | + | ||
381 | + cudnn.benchmark = True | ||
382 | + | ||
383 | + # define loss function (option 2) | ||
384 | + #criterion = FocalLoss( | ||
385 | + # gamma=args['loss']['gamma'], alpha=args['loss']['alpha']).cuda() | ||
386 | + | ||
387 | + # evaluate on validation set | ||
388 | + prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q) | ||
389 | + | ||
390 | + # remember best prec@1 and save checkpoint | ||
391 | + best_prec1 = max(prec1, best_prec1) | ||
392 | + logger.info(f'Best accuracy: {best_prec1}') | ||
393 | + | ||
394 | + | ||
395 | +def validate(val_loader, model, criterion, normalize_factor, args, q): | ||
396 | + """Perform validation on the validation set""" | ||
397 | + with torch.no_grad(): | ||
398 | + batch_time = AverageMeter() | ||
399 | + losses = AverageMeter() | ||
400 | + top1 = AverageMeter() | ||
401 | + | ||
402 | + # switch to evaluate mode | ||
403 | + model.eval() | ||
404 | + | ||
405 | + end = time.time() | ||
406 | + for i, data in enumerate(val_loader): | ||
407 | + (input, target), (path, _) = data | ||
408 | + if torch.cuda.is_available(): | ||
409 | + target = target.cuda() | ||
410 | + input = input.cuda() | ||
411 | + | ||
412 | + # compute output | ||
413 | + output = model(input) | ||
414 | + | ||
415 | + loss = criterion(output, target) | ||
416 | + | ||
417 | + # measure accuracy and record loss | ||
418 | + prec1 = accuracy(output.data, target, topk=(1,))[0] | ||
419 | + | ||
420 | + #save error case | ||
421 | + #save correct = 맞은거 까지 저장하는지 마는지. | ||
422 | + if args['predict']['save']: | ||
423 | + save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False) | ||
424 | + | ||
425 | + losses.update(loss.item(), input.size(0)) | ||
426 | + top1.update(prec1.item(), input.size(0)) | ||
427 | + | ||
428 | + # measure elapsed time | ||
429 | + batch_time.update(time.time() - end) | ||
430 | + end = time.time() | ||
431 | + | ||
432 | + if i % 1 == 0: | ||
433 | + logger.info('Test: [{0}/{1}]\t' | ||
434 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
435 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
436 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( | ||
437 | + i, len(val_loader), batch_time=batch_time, loss=losses, | ||
438 | + top1=top1)) | ||
439 | + | ||
440 | + printlog(' * Prec@1 {top1.avg:.3f}'.format(top1=top1), logger, q) | ||
441 | + | ||
442 | + if args["predict"]["cam"]: | ||
443 | + logger.info("Creating CAM") | ||
444 | + | ||
445 | + #print grad cam | ||
446 | + if args['predict']['normalize']: | ||
447 | + make_grad_cam(f"eval_results/{args['task']}/error_case", | ||
448 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'], args=args) | ||
449 | + else: | ||
450 | + make_grad_cam(f"eval_results/{args['task']}/error_case", | ||
451 | + f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None, cam_class=args['predict']['cam-class'], args = args) | ||
452 | + | ||
453 | + return top1.avg | ||
454 | + | ||
455 | +def save_error_case(output, target, path, args , topk=(1,), input=None, save_correct=False): | ||
456 | + global error_case_idx, correct_case_idx | ||
457 | + | ||
458 | + error_case_idx = 0 | ||
459 | + correct_case_idx = 0 | ||
460 | + | ||
461 | + _, class_arr, _ = get_savepath_classes_args(args['task']) | ||
462 | + | ||
463 | + p = F.softmax(output, dim=1) | ||
464 | + | ||
465 | + values = torch.topk(p,2).values.tolist() | ||
466 | + indices = torch.topk(p,2).indices.tolist() | ||
467 | + | ||
468 | + maxk = max(topk) | ||
469 | + batch_size = target.size(0) | ||
470 | + _, pred = output.topk(maxk, 1, True, True) | ||
471 | + | ||
472 | + pred = pred.t() | ||
473 | + correct = pred.eq(target.view(1, -1).expand_as(pred)) | ||
474 | + | ||
475 | + pred = pred.view(batch_size) | ||
476 | + correct = correct.view(batch_size) | ||
477 | + | ||
478 | + if not os.path.exists(f'eval_results'): | ||
479 | + os.mkdir(f'eval_results') | ||
480 | + | ||
481 | + if not os.path.exists(f"eval_results/{args['task']}"): | ||
482 | + os.mkdir(f"eval_results/{args['task']}") | ||
483 | + | ||
484 | + if not os.path.exists(f"eval_results/{args['task']}/error_case"): | ||
485 | + os.mkdir(f"eval_results/{args['task']}/error_case") | ||
486 | + | ||
487 | + if not os.path.exists(f"eval_results/{args['task']}/correct_case") and save_correct: | ||
488 | + os.mkdir(f"eval_results/{args['task']}/correct_case") | ||
489 | + | ||
490 | + for idx, correct_element in enumerate(correct): | ||
491 | + # 틀린 경우 | ||
492 | + if correct_element.item() == 0: | ||
493 | + #save_image(input[idx], f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{target[idx]}_pred_{pred[idx]}.bmp") | ||
494 | + img = cv2.imread(path[idx]) | ||
495 | + cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[indices[idx][0]]}={round(values[idx][0]*100,1)}_{class_arr[indices[idx][1]]}={round(values[idx][1]*100,1)}_real.bmp" ,img) | ||
496 | + error_case_idx = error_case_idx + 1 | ||
497 | + | ||
498 | + # 맞는 경우에도 저장. | ||
499 | + if save_correct and correct_element.item() == 1: | ||
500 | + #save_image(input[idx], f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{target[idx]}.bmp") | ||
501 | + img = cv2.imread(path[idx]) | ||
502 | + cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img) | ||
503 | + correct_case_idx = correct_case_idx + 1 | ||
504 | + | ||
505 | + | ||
506 | +if __name__ == '__main__': | ||
507 | + parser = argparse.ArgumentParser() | ||
508 | + parser.add_argument("--config", default="configs/Error_config.yml", help="train config file") #config 파일을 디폴트로 받음. | ||
509 | + args = parser.parse_args() | ||
510 | + | ||
511 | + if args.config == 'Error': | ||
512 | + args = get_args_from_yaml("configs/Error_config.yml") | ||
513 | + elif args.config == 'Type': | ||
514 | + args = get_args_from_yaml('configs/ErrorType_config.yml') | ||
515 | + else: | ||
516 | + args = get_args_from_yaml('configs/All_config.yml') | ||
517 | + args['id'] = 'eval' | ||
518 | + | ||
519 | + main(args) |
train.py
0 → 100644
1 | +import argparse | ||
2 | +import datetime | ||
3 | +import logging | ||
4 | +import os | ||
5 | +import shutil | ||
6 | +import time | ||
7 | + | ||
8 | +import numpy as np | ||
9 | +import PIL | ||
10 | +import torch | ||
11 | +import torch.backends.cudnn as cudnn | ||
12 | +import torch.nn as nn | ||
13 | +import torch.nn.parallel | ||
14 | +import torch.optim | ||
15 | +import adabound | ||
16 | +import torch.utils.data | ||
17 | +from torch.utils.data.sampler import SubsetRandomSampler | ||
18 | +import torchvision.models as models | ||
19 | +import torchvision.datasets as datasets | ||
20 | +import torchvision.transforms as transforms | ||
21 | +from torchvision.utils import save_image | ||
22 | +from PIL.ImageOps import grayscale | ||
23 | +from PIL import Image | ||
24 | +from get_mean_std import get_params | ||
25 | + | ||
26 | +import random | ||
27 | +from tensorboard_logger import configure, log_value | ||
28 | +from model import mobilenetv3, EfficientNet | ||
29 | +from focal_loss import FocalLoss | ||
30 | +from utils import get_args_from_yaml, accuracy, precision, recall, AverageMeter, printlog, FastDataLoader | ||
31 | +import threading | ||
32 | + | ||
33 | +# make Logger | ||
34 | +logger = logging.getLogger('Techwing_log_file') | ||
35 | +logger.setLevel(logging.INFO) | ||
36 | + | ||
37 | +# make Logger stream (콘솔창에 띄우고 싶으면 해당 주석처리 제거) | ||
38 | +#streamHandler = logging.StreamHandler() | ||
39 | +#logger.addHandler(streamHandler) | ||
40 | + | ||
41 | +# used for logging to TensorBoard | ||
42 | +best_prec1 = 0 | ||
43 | +print_dataset_statestics = False | ||
44 | + | ||
45 | +# Set Ratio test & train set | ||
46 | +validation_ratio = 0.1 | ||
47 | +random_seed = 10 | ||
48 | + | ||
49 | +#confidence | ||
50 | +seed = random.randint(1,1000) | ||
51 | + | ||
52 | +# UI 단에서 실행하는 Train 방식. | ||
53 | +def UI_train(mode, q, **kwargs): | ||
54 | + try: | ||
55 | + if mode == 'Error': | ||
56 | + args = get_args_from_yaml("configs/Error_config.yml") | ||
57 | + elif mode == 'Type': | ||
58 | + args = get_args_from_yaml('configs/ErrorType_config.yml') | ||
59 | + else: | ||
60 | + args = get_args_from_yaml('configs/All_config.yml') | ||
61 | + | ||
62 | + args['model']['blocks'] = kwargs['blocknum'] | ||
63 | + args['data']['train'] = kwargs["data_path"] | ||
64 | + args['train']['epochs'] = kwargs["epoch"] | ||
65 | + args['optimizer']['type'] = kwargs["optim"] | ||
66 | + args['optimizer']['lr'] = kwargs["lr"] | ||
67 | + args['train']["batch-size"] = kwargs["batch_size"] | ||
68 | + args['predict']['batch-size'] = kwargs["batch_size"] | ||
69 | + args['train']['size'] = kwargs["size"] | ||
70 | + | ||
71 | + if kwargs['resume']: | ||
72 | + q.put(f"resume training with checkpoint : {kwargs['ck_path']}") | ||
73 | + args['train']['resume'] = kwargs['ck_path'] | ||
74 | + | ||
75 | + timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) | ||
76 | + args['id'] = "train_" + timestring | ||
77 | + main(args, q=q) | ||
78 | + except Exception as ex: | ||
79 | + q.put(f"실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요") | ||
80 | + logger.info(ex) | ||
81 | + | ||
82 | +# args: user hyperparameters, q: Queue of UI | ||
83 | +def main(args, q=None): | ||
84 | + try: | ||
85 | + logdir = f"logs/{args['task']}/" | ||
86 | + | ||
87 | + if not os.path.exists(logdir): | ||
88 | + os.makedirs(logdir) | ||
89 | + | ||
90 | + if q == None: | ||
91 | + streamHandler = logging.StreamHandler() | ||
92 | + logger.addHandler(streamHandler) | ||
93 | + | ||
94 | + # 따로 적는 Logfile 설정 및 stream 설정. | ||
95 | + fileHandler = logging.FileHandler(logdir + f"{args['id']}_{args['modelname']}_block_{args['model']['blocks']}.log") | ||
96 | + logger.addHandler(fileHandler) | ||
97 | + | ||
98 | + timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) | ||
99 | + logger.info(timestring) | ||
100 | + | ||
101 | + # Train & Validate | ||
102 | + run_model(args, q) | ||
103 | + | ||
104 | + timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time())) | ||
105 | + logger.info(timestring) | ||
106 | + logging.info(f"[{args['id']}] done") | ||
107 | + | ||
108 | + except Exception: | ||
109 | + logger.error("Fatal error in main loop", exc_info=True) | ||
110 | + logger.warn(f"[{args['id']}] failed") | ||
111 | + | ||
112 | +# args : Yaml에서 가져온 설정 파일, q : Dialog 에 입력하기 위하여 쓰인 Queue | ||
113 | +def run_model(args, q=None): | ||
114 | + global best_prec1 | ||
115 | + | ||
116 | + #실험 정보 기입. (task= All, id= 랜덤 자연수, model= mobilenetv3, epoch= 1~3000, block= 1~11, class= 2~8) | ||
117 | + args['task'] = "%s/%s_model=%s-ep=%s-block=%s-class=%s" % ( | ||
118 | + args['task'], | ||
119 | + args['id'], | ||
120 | + args['modelname'], | ||
121 | + args['train']['epochs'], | ||
122 | + args['model']['blocks'], | ||
123 | + args['model']['class']) | ||
124 | + | ||
125 | + logger.info(f"use seed {seed}") | ||
126 | + logger.info(f"use dataset : {args['data']['train']}") | ||
127 | + | ||
128 | + # get GPU information from configs file | ||
129 | + logger.info(args) | ||
130 | + gpus = args['gpu'] | ||
131 | + resize_size = args['train']['size'] | ||
132 | + | ||
133 | + ############################# 데이터 불러오는 과정 ############################# | ||
134 | + # Data loading code | ||
135 | + #feature 노말라이즈 적용. | ||
136 | + mean, std = get_params(args['data']['train'], resize_size) | ||
137 | + normalize = transforms.Normalize(mean=[mean[0].item()], | ||
138 | + std=[std[0].item()]) | ||
139 | + | ||
140 | + #Train data loader에 적용하는 함수 (순서대로 적용됨.) | ||
141 | + if args['train']['augment']: | ||
142 | + transform_train = transforms.Compose([ | ||
143 | + transforms.Resize((resize_size, resize_size)), # 가로세로 크기 조정 | ||
144 | + transforms.ColorJitter(0.2,0.2,0.2), # 밝기, 대비, 채도 조정 | ||
145 | + transforms.RandomRotation(2), # -2~ 2도 만큼 회전 | ||
146 | + transforms.RandomAffine(5), # affine 변환 (평행사변형이 된다든지, 사다리꼴이 된다든지) | ||
147 | + transforms.RandomCrop(resize_size, padding=2), # 원본에서 padding을 상하좌우 2로 둔 뒤, 64만큼 자름 | ||
148 | + transforms.RandomHorizontalFlip(), # Data 변환 좌우 반전 | ||
149 | + transforms.Grayscale(), | ||
150 | + transforms.ToTensor(), | ||
151 | + normalize | ||
152 | + ]) | ||
153 | + # option not augment | ||
154 | + else: | ||
155 | + transform_train = transforms.Compose([ | ||
156 | + transforms.Resize((resize_size, resize_size)), | ||
157 | + transforms.ToTensor(), | ||
158 | + normalize | ||
159 | + ]) | ||
160 | + | ||
161 | + # Test loader에 적용하는 함수. | ||
162 | + transform_test = transforms.Compose([ | ||
163 | + transforms.Resize((resize_size, resize_size)), | ||
164 | + transforms.Grayscale(), | ||
165 | + transforms.ToTensor(), | ||
166 | + normalize | ||
167 | + ]) | ||
168 | + | ||
169 | + # num workers : GPU의 스레드개수, Pin memory는 GPU에 데이터를 올리는 설정. | ||
170 | + kwargs = {'num_workers': args['train']['worker'], 'pin_memory': True} | ||
171 | + | ||
172 | + # image Folder config에서 설정한 path로 해주면 해당 디렉토리 안의 디렉토리의 이름이 Class가 된다. | ||
173 | + train_data = datasets.ImageFolder(args['data']['train'], transform_train) | ||
174 | + val_data = datasets.ImageFolder(args['data']['train'],transform_test) | ||
175 | + | ||
176 | + # Train data를 전체 데이터로 설정하였기 때문에 validation을 진행하기 위해서 | ||
177 | + # validation ratio = 0.1 즉 10%를 validation set으로 설정. | ||
178 | + random_seed = 10 | ||
179 | + validation_ratio = 0.1 | ||
180 | + num_train = len(train_data) | ||
181 | + indices = list(range(num_train)) | ||
182 | + split = int(np.floor(validation_ratio * num_train)) | ||
183 | + | ||
184 | + # 랜덤 시드 설정. (Train이나 ,Test 일때 모두 10 이므로 같은 데이터셋이라 할 수 있다) | ||
185 | + np.random.seed(random_seed) | ||
186 | + np.random.shuffle(indices) | ||
187 | + | ||
188 | + # Train set, Validation set 나누기. | ||
189 | + train_idx, valid_idx = indices[split:], indices[:split] | ||
190 | + train_sampler = SubsetRandomSampler(train_idx) | ||
191 | + valid_sampler = SubsetRandomSampler(valid_idx) | ||
192 | + train_loader = FastDataLoader( | ||
193 | + train_data, batch_size=args['train']['batch-size'], sampler=train_sampler, #shuffle = True | ||
194 | + **kwargs) | ||
195 | + val_loader = FastDataLoader( | ||
196 | + val_data, batch_size=args['train']['batch-size'], sampler=valid_sampler, #shuffle = False | ||
197 | + **kwargs) | ||
198 | + | ||
199 | + ############################## 모델 설정 과정 ############################# | ||
200 | + # Convolution 초기화 할 때 Random이 사용되는데 그때 사용되는 Seed 설정. | ||
201 | + torch.manual_seed(seed) | ||
202 | + | ||
203 | + # 각 클래스마다 weight를 주어서 차등적으로 학습 | ||
204 | + class_weights = torch.FloatTensor(args['train']['weight']) | ||
205 | + | ||
206 | + # Loss 함수 설정. Cross entropy 사용. | ||
207 | + criterion = nn.CrossEntropyLoss(weight=class_weights) | ||
208 | + | ||
209 | + # 모델로는 Mobilenet V3 사용 | ||
210 | + model = mobilenetv3(n_class=args['model']['class'], blocknum=args['model']['blocks'], dropout=0.5) | ||
211 | + | ||
212 | + # SGD를 사용. (Adam이 성능이 우월하지만 Tuning을 잘한 SGD가 Adam보다 성능이 좋을 때가 많기 때문에 SGD 사용.) | ||
213 | + if args['optimizer']['type'] == "SGD": | ||
214 | + optimizer = torch.optim.SGD(model.parameters(), args['optimizer']['lr'], | ||
215 | + momentum=args['optimizer']['momentum'], | ||
216 | + nesterov=True, | ||
217 | + weight_decay=args['optimizer']['weight_decay']) | ||
218 | + scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader)) | ||
219 | + | ||
220 | + else: | ||
221 | + optimizer = torch.optim.Adam(model.parameters(), args['optimizer']['lr'], | ||
222 | + weight_decay=args['optimizer']['weight_decay']) | ||
223 | + scheduler = None #Adam을 사용하면 Optimizer에서 LR을 줄여주므로 스케쥴러 사용하지 않음. | ||
224 | + | ||
225 | + # get the number of model parameters | ||
226 | + logger.info('Number of model parameters: {}'.format( | ||
227 | + sum([p.data.nelement() for p in model.parameters()]))) | ||
228 | + | ||
229 | + # for training on multiple GPUs. | ||
230 | + # Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use | ||
231 | + # 멀티 GPU 설정. | ||
232 | + if torch.cuda.is_available(): | ||
233 | + torch.cuda.set_device(gpus[0]) | ||
234 | + with torch.cuda.device(gpus[0]): | ||
235 | + model = model.cuda() | ||
236 | + criterion = criterion.cuda() | ||
237 | + model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0]) # 모델을 다른 GPU에 뿌려준 다음 Gradient를 한 군데에서 계산하기 때문에 보통 0번 GPU에 많은 메로리가 할당됨. | ||
238 | + # 하나의 GPU에 많은 메모리가 할당되면 batchsize를 늘릴 수 없기 때문에 이를 해결하기 위하여 output_device를 할당. | ||
239 | + # 해당 코드는 데이터의 크기가 작기 때문에 0번에다가 모두 처리하는 것으로 설정. | ||
240 | + else: | ||
241 | + model = torch.nn.DataParallel(model) | ||
242 | + device = torch.device("cpu") | ||
243 | + model.to(device) | ||
244 | + criterion.to(device) | ||
245 | + | ||
246 | + # 유저가 입력한 Checkpoint에서 다시 retraining 한다고 설정을 하였을 때. | ||
247 | + if args['train']['resume']: | ||
248 | + # 해당 경로에 실제로 파일이 있으면. | ||
249 | + if os.path.isfile(args['train']['resume']): | ||
250 | + logger.info(f"=> loading checkpoint '{args['train']['resume']}'") | ||
251 | + | ||
252 | + if torch.cuda.is_available(): | ||
253 | + checkpoint = torch.load(args['train']['resume']) | ||
254 | + else: | ||
255 | + checkpoint = torch.load(args['train']['resume'], map_location=torch.device('cpu')) | ||
256 | + | ||
257 | + # 시작 Epoch또한 checkpoint의 epoch로 설정. | ||
258 | + args['train']['start-epoch'] = checkpoint['epoch'] | ||
259 | + | ||
260 | + # checkpoint에서 나온 Best accuracy를 가져옴. | ||
261 | + best_prec1 = checkpoint['best_prec1'] | ||
262 | + | ||
263 | + # 앞서 선언한 모델에 weight들을 설정. | ||
264 | + model.load_state_dict(checkpoint['state_dict']) | ||
265 | + logger.info(f"=> loaded checkpoint '{args['train']['resume']}' (epoch {checkpoint['epoch']})") | ||
266 | + # 파일이 없다면 | ||
267 | + else: | ||
268 | + logger.info(f"=> no checkpoint found at '{args.resume}'") | ||
269 | + | ||
270 | + # True를 설정하면 Cudnn 라이브러리에서 hardware에 따라 사용하는 내부 알고리즘을 바꾸어 준다. | ||
271 | + # Tensor의 크기나 Gpu Memory에 따라 효율적인 convolution 알고리즘이 다르기 때문. | ||
272 | + cudnn.benchmark = True | ||
273 | + | ||
274 | + for epoch in range(args['train']['start-epoch'], args['train']['epochs']): | ||
275 | + # train for one epoch | ||
276 | + train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q) | ||
277 | + | ||
278 | + # evaluate on validation set | ||
279 | + prec1, prec, rec = validate(val_loader, model, criterion, epoch, args, q) | ||
280 | + | ||
281 | + # remember best prec@1 and save checkpoint | ||
282 | + is_best = prec1 >= best_prec1 | ||
283 | + | ||
284 | + best_prec1 = max(prec1, best_prec1) | ||
285 | + checkpoint = save_checkpoint({ | ||
286 | + 'epoch': epoch + 1, | ||
287 | + 'state_dict': model.state_dict(), | ||
288 | + 'best_prec1': best_prec1, | ||
289 | + }, is_best, args, args['model']['blocks'], q) | ||
290 | + | ||
291 | + printlog(f'Best accuracy: {best_prec1}', logger, q) | ||
292 | + | ||
293 | + if args['model']['class'] !=2: | ||
294 | + for i in range(len(prec)): | ||
295 | + logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec[i])) | ||
296 | + logger.info(' * recall {rec.avg:.3f}'.format(rec=rec[i])) | ||
297 | + else: | ||
298 | + logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec)) | ||
299 | + logger.info(' * recall {rec.avg:.3f}'.format(rec=rec)) | ||
300 | + | ||
301 | + #count = count + 1 | ||
302 | + return checkpoint | ||
303 | + | ||
304 | +# train_loader : 데이터 로더 | ||
305 | +# Model : MobilenetV3(default). | ||
306 | +# criterion : Crossentropy (default). | ||
307 | +# scheduler : CosineAnnealing. | ||
308 | +# epoch : 3000. | ||
309 | +# args : config parameters. yaml 파일에서 확인하실 수 있습니다. | ||
310 | +# q : UI창에서 글을 쓰는 역활을 하는 Queue. (쓰레드에서 Queue에서 지속적으로 가지고와서 입력.) | ||
311 | +def train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q=None): | ||
312 | + """Train for one epoch on the training set""" | ||
313 | + batch_time = AverageMeter() | ||
314 | + losses = AverageMeter() | ||
315 | + top1 = AverageMeter() | ||
316 | + prec = AverageMeter() | ||
317 | + | ||
318 | + # switch to train mode | ||
319 | + model.train() | ||
320 | + end = time.time() | ||
321 | + for i, (input, target) in enumerate(train_loader): | ||
322 | + if torch.cuda.is_available(): | ||
323 | + target = target.cuda() | ||
324 | + input = input.cuda() | ||
325 | + # compute output | ||
326 | + output = model(input) | ||
327 | + loss = criterion(output, target) | ||
328 | + # measure accuracy and record loss | ||
329 | + prec1 = accuracy(output, target, topk=(1,))[0] | ||
330 | + | ||
331 | + losses.update(loss.item(), input.size(0)) | ||
332 | + top1.update(prec1.item(), input.size(0)) | ||
333 | + | ||
334 | + # compute gradient and do SGD step | ||
335 | + optimizer.zero_grad() | ||
336 | + loss.backward() | ||
337 | + optimizer.step() | ||
338 | + if scheduler != None: | ||
339 | + scheduler.step() | ||
340 | + | ||
341 | + # measure elapsed time | ||
342 | + batch_time.update(time.time() - end) | ||
343 | + end = time.time() | ||
344 | + | ||
345 | + if i % args['etc']['print_freq'] == 0: | ||
346 | + # Error / Normal Case를 분류하는 Task의 결과 string. | ||
347 | + if args['model']['class'] == 2: | ||
348 | + logger.info('Epoch: [{0}][{1}/{2}]\t' | ||
349 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
350 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
351 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' | ||
352 | + 'Precision {prec.val:.3f} ({prec.avg:.3f})' | ||
353 | + .format( | ||
354 | + epoch, i, len(train_loader), batch_time=batch_time, | ||
355 | + loss=losses, top1=top1, prec=prec)) | ||
356 | + # All task나 ErrorType Task를 실행하였을 때의 결과 string. (6개 이상의 Class의 Prcision을 표현하기에는 문제가 있을것 같아서 | ||
357 | + # precision 부분은 맨 마지막에 호출하는 것으로 구성.) | ||
358 | + else: | ||
359 | + logger.info('Epoch: [{0}][{1}/{2}]\t' | ||
360 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
361 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
362 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' | ||
363 | + .format( | ||
364 | + epoch, i, len(train_loader), batch_time=batch_time, | ||
365 | + loss=losses, top1=top1)) | ||
366 | + | ||
367 | + # log to TensorBoard | ||
368 | + if args['etc']['tensorboard']: | ||
369 | + log_value('train_loss', losses.avg, epoch) | ||
370 | + log_value('train_acc', top1.avg, epoch) | ||
371 | + | ||
372 | +def validate(val_loader, model, criterion, epoch, args, q=None): | ||
373 | + """Perform validaadd_model_to_queuetion on the validation set""" | ||
374 | + with torch.no_grad(): | ||
375 | + batch_time = AverageMeter() | ||
376 | + losses = AverageMeter() | ||
377 | + top1 = AverageMeter() | ||
378 | + prec = [] | ||
379 | + rec = [] | ||
380 | + if args['model']['class'] == 2: | ||
381 | + prec = AverageMeter() | ||
382 | + rec = AverageMeter() | ||
383 | + else: | ||
384 | + for i in range(args['model']['class']): | ||
385 | + prec.append(AverageMeter()) | ||
386 | + rec.append(AverageMeter()) | ||
387 | + # switch to evaluate mode | ||
388 | + model.eval() | ||
389 | + end = time.time() | ||
390 | + | ||
391 | + for i, (input, target) in enumerate(val_loader): | ||
392 | + if torch.cuda.is_available(): | ||
393 | + target = target.cuda() | ||
394 | + input = input.cuda() | ||
395 | + | ||
396 | + # compute output | ||
397 | + output = model(input) | ||
398 | + loss = criterion(output, target) | ||
399 | + | ||
400 | + # measure accuracy and record loss | ||
401 | + prec1 = accuracy(output.data, target, topk=(1,))[0] | ||
402 | + | ||
403 | + losses.update(loss.item(), input.size(0)) | ||
404 | + top1.update(prec1.item(), input.size(0)) | ||
405 | + if args['model']['class'] == 2: | ||
406 | + prec.update(precision(output.data, target, target_class=0), input.size(0)) | ||
407 | + rec.update(recall(output.data, target, target_class=0), input.size(0)) | ||
408 | + else: | ||
409 | + for k in range(args['model']['class']): | ||
410 | + prec[k].update(precision(output.data, target, target_class=k), input.size(0)) | ||
411 | + rec[k].update(recall(output.data, target, target_class=k), input.size(0)) | ||
412 | + | ||
413 | + # measure elapsed time | ||
414 | + batch_time.update(time.time() - end) | ||
415 | + end = time.time() | ||
416 | + | ||
417 | + if i % args['etc']['print_freq'] == 0: | ||
418 | + if args['model']['class'] == 2: | ||
419 | + logger.info('Test: [{0}/{1}]\t' | ||
420 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
421 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
422 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' | ||
423 | + 'Precision {prec.val:.3f} ({prec.avg:.3f})' | ||
424 | + .format( | ||
425 | + i, len(val_loader), batch_time=batch_time, loss=losses, | ||
426 | + top1=top1, prec=prec)) | ||
427 | + else: | ||
428 | + logger.info('Test: [{0}/{1}]\t' | ||
429 | + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' | ||
430 | + 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' | ||
431 | + 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})' | ||
432 | + .format( | ||
433 | + i, len(val_loader), batch_time=batch_time, loss=losses, | ||
434 | + top1=top1)) | ||
435 | + | ||
436 | + printlog(' * epoch: {epoch} Prec@1 {top1.avg:.3f}'.format(epoch=epoch,top1=top1), logger, q) | ||
437 | + | ||
438 | + if args['model']['class'] == 2: | ||
439 | + logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec)) | ||
440 | + logger.info(' * recall {rec.avg:.3f}'.format(rec=rec)) | ||
441 | + | ||
442 | + # log to TensorBoard | ||
443 | + if args['etc']['tensorboard']: | ||
444 | + log_value('val_loss', losses.avg, epoch) | ||
445 | + log_value('val_acc', top1.avg, epoch) | ||
446 | + return top1.avg, prec, rec | ||
447 | + | ||
448 | + | ||
449 | +def save_checkpoint(state, is_best, args, block, q, filename='checkpoint.pth.tar'): | ||
450 | + """Saves checkpoint to disk""" | ||
451 | + directory = "%s/%s/" % (args['output'], args['task']) | ||
452 | + if not os.path.exists(directory): | ||
453 | + os.makedirs(directory) | ||
454 | + filename = directory + filename | ||
455 | + torch.save(state, filename) | ||
456 | + logger.info(f"Checkpoint Saved: {filename}") | ||
457 | + best_filename = f"{args['output']}/{args['task']}/model_best.pth.tar" | ||
458 | + if is_best: | ||
459 | + shutil.copyfile(filename, best_filename) | ||
460 | + logger.info(f"New Best Checkpoint saved: {best_filename}") | ||
461 | + | ||
462 | + return best_filename | ||
463 | + | ||
464 | + | ||
465 | + | ||
466 | +def save_error_case(output, target,epoch, topk=(1,), input=None): | ||
467 | + maxk = max(topk) | ||
468 | + batch_size = target.size(0) | ||
469 | + _, pred = output.topk(maxk, 1, True, True) | ||
470 | + pred = pred.t() | ||
471 | + correct = pred.eq(target.view(1, -1).expand_as(pred)) | ||
472 | + pred = pred.view(batch_size) | ||
473 | + correct = correct.view(batch_size) | ||
474 | + | ||
475 | + for idx, correct_element in enumerate(correct): | ||
476 | + image = input[idx] | ||
477 | + save_image(image, f"error_case/epoch_{epoch}_idx_{idx}_case_{pred[idx]}_{target[idx]}.bmp") | ||
478 | + | ||
479 | + | ||
480 | +if __name__ == '__main__': | ||
481 | + parser = argparse.ArgumentParser() | ||
482 | + parser.add_argument("--config", required=True, help="train config file") #config 파일을 디폴트로 받음. | ||
483 | + args = parser.parse_args() | ||
484 | + if args.config == 'Error': | ||
485 | + args = get_args_from_yaml("configs/Error_config.yml") | ||
486 | + elif args.config == 'ErrorType': | ||
487 | + args = get_args_from_yaml('configs/ErrorType_config.yml') | ||
488 | + else: | ||
489 | + args = get_args_from_yaml('configs/All_config.yml') | ||
490 | + | ||
491 | + | ||
492 | + #job id | ||
493 | + args['id'] = str(random.randint(0,99999)) | ||
494 | + main(args) | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
utils.py
0 → 100644
1 | +import yaml | ||
2 | +import torchvision.datasets as datasets | ||
3 | +import re | ||
4 | +import math | ||
5 | +import collections | ||
6 | +from functools import partial | ||
7 | +import torch | ||
8 | +from torch import nn | ||
9 | +from torch.nn import functional as F | ||
10 | +from torch.utils import model_zoo | ||
11 | + | ||
12 | +class AverageMeter(object): | ||
13 | + """Computes and stores the average and current value""" | ||
14 | + | ||
15 | + def __init__(self): | ||
16 | + self.reset() | ||
17 | + | ||
18 | + def reset(self): | ||
19 | + self.val = 0 | ||
20 | + self.avg = 0 | ||
21 | + self.sum = 0 | ||
22 | + self.count = 0 | ||
23 | + | ||
24 | + def update(self, val, n=1): | ||
25 | + self.val = val | ||
26 | + self.sum += val * n | ||
27 | + self.count += n | ||
28 | + self.avg = self.sum / self.count | ||
29 | + | ||
30 | +# 기존 ImageFolder는 For문에 들어갈 때 튜플 (input, label)을 만들지만 | ||
31 | +# 해당 클래스는 (input, label, path) 까지 만들어 내도록 구성. | ||
32 | +class MyImageFolder(datasets.ImageFolder): | ||
33 | + def __getitem__(self, index): | ||
34 | + # return image path | ||
35 | + return super(MyImageFolder, self).__getitem__(index), self.imgs[index] | ||
36 | + | ||
37 | +# instead of BatchSampler | ||
38 | +class _RepeatSampler(object): | ||
39 | + """ Sampler that repeats forever. | ||
40 | + | ||
41 | + Args: | ||
42 | + sampler (Sampler) | ||
43 | + """ | ||
44 | + | ||
45 | + def __init__(self, sampler): | ||
46 | + self.sampler = sampler | ||
47 | + | ||
48 | + def __iter__(self): | ||
49 | + while True: | ||
50 | + yield from iter(self.sampler) | ||
51 | + | ||
52 | + | ||
53 | +class FastDataLoader(torch.utils.data.dataloader.DataLoader): | ||
54 | + | ||
55 | + def __init__(self, *args, **kwargs): | ||
56 | + super().__init__(*args, **kwargs) | ||
57 | + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) #기존의 Batch sampler를 wrap | ||
58 | + self.iterator = super().__iter__() #Multiprocessing 인지 singleProcessing인지. | ||
59 | + | ||
60 | + def __len__(self): | ||
61 | + return len(self.batch_sampler.sampler) | ||
62 | + | ||
63 | + def __iter__(self): | ||
64 | + for i in range(len(self)): | ||
65 | + yield next(self.iterator) | ||
66 | + | ||
67 | + | ||
68 | +# yml 파일 안에 적혀진 정보 받기. | ||
69 | +def get_args_from_yaml(file): | ||
70 | + with open(file) as f: | ||
71 | + conf = yaml.load(f) | ||
72 | + return conf | ||
73 | + | ||
74 | +# output : model output | ||
75 | +# target : input's label | ||
76 | +# | ||
77 | +def accuracy(output, target, topk=(1,)): | ||
78 | + """Computes the precision@k for the specified values of k""" | ||
79 | + maxk = max(topk) | ||
80 | + batch_size = target.size(0) | ||
81 | + | ||
82 | + _, pred = output.topk(maxk, 1, True, True) | ||
83 | + | ||
84 | + # transpose (target과 사이즈를 맞추게 하기 위해) | ||
85 | + pred = pred.t() | ||
86 | + | ||
87 | + correct = pred.eq(target.view(1, -1).expand_as(pred)) | ||
88 | + res = [] | ||
89 | + for k in topk: | ||
90 | + correct_k = correct[:k].view(-1).float().sum(0) | ||
91 | + res.append(correct_k.mul_(100.0 / batch_size)) | ||
92 | + return res | ||
93 | + | ||
94 | +#에러를 기본 Class로 설정 | ||
95 | +def precision(output, target, e=1e-3, target_class=0): | ||
96 | + _, pred = output.topk(1, 1, True, True) | ||
97 | + pred = pred.t() | ||
98 | + new_target = target.view(1, -1).expand_as(pred) | ||
99 | + | ||
100 | + pred = pred.squeeze() | ||
101 | + new_target = new_target.squeeze() | ||
102 | + | ||
103 | + true_positives = sum(1 for i in range(len(pred)) | ||
104 | + if pred[i] == target[i] and target[i] == target_class) # (ture positive) | ||
105 | + false_positives = sum(1 for i in range(len(pred)) | ||
106 | + if pred[i] == target_class and target[i] != target_class) #예측한거와 label이 다르고 예측한게 Error라고 생각하는 경우. | ||
107 | + | ||
108 | + #logger.info("TP: %s, FP: %s" % (true_positives, false_positives)) | ||
109 | + | ||
110 | + if true_positives + false_positives == 0 and true_positives == 0: | ||
111 | + return 100. | ||
112 | + | ||
113 | + return (true_positives / (true_positives + false_positives + e)) * 100. | ||
114 | + | ||
115 | +#에러를 기본 class로 설정 | ||
116 | +def recall(output, target, e=1e-3, target_class=0): | ||
117 | + _, pred = output.topk(1, 1, True, True) | ||
118 | + pred = pred.t() | ||
119 | + new_target = target.view(1, -1).expand_as(pred) | ||
120 | + | ||
121 | + pred = pred.squeeze() | ||
122 | + new_target = new_target.squeeze() | ||
123 | + true_positives = sum(1 for i in range(len(pred)) | ||
124 | + if pred[i] == target[i] and target[i] == target_class) # (ture positive) | ||
125 | + false_nagatives = sum(1 for i in range(len(pred)) | ||
126 | + if pred[i] != target_class and target[i] == target_class) | ||
127 | + | ||
128 | + if true_positives + false_nagatives == 0 and true_positives == 0: | ||
129 | + return 100. | ||
130 | + return (true_positives / (true_positives + false_nagatives + e)) * 100. | ||
131 | + | ||
132 | +def printlog(string, logger, q): | ||
133 | + if q!=None: | ||
134 | + q.put(string) | ||
135 | + logger.info(string) | ||
136 | + | ||
137 | + | ||
138 | +######################################################################## | ||
139 | +############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ############### | ||
140 | +######################################################################## | ||
141 | + | ||
142 | + | ||
143 | +# Parameters for the entire model (stem, all blocks, and head) | ||
144 | +GlobalParams = collections.namedtuple('GlobalParams', [ | ||
145 | + 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', | ||
146 | + 'num_classes', 'width_coefficient', 'depth_coefficient', | ||
147 | + 'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size']) | ||
148 | + | ||
149 | +# Parameters for an individual model block | ||
150 | +BlockArgs = collections.namedtuple('BlockArgs', [ | ||
151 | + 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', | ||
152 | + 'expand_ratio', 'id_skip', 'stride', 'se_ratio']) | ||
153 | + | ||
154 | +# Change namedtuple defaults | ||
155 | +GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields) | ||
156 | +BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) | ||
157 | + | ||
158 | + | ||
159 | +class SwishImplementation(torch.autograd.Function): | ||
160 | + @staticmethod | ||
161 | + def forward(ctx, i): | ||
162 | + result = i * torch.sigmoid(i) | ||
163 | + ctx.save_for_backward(i) | ||
164 | + return result | ||
165 | + | ||
166 | + @staticmethod | ||
167 | + def backward(ctx, grad_output): | ||
168 | + i = ctx.saved_variables[0] | ||
169 | + sigmoid_i = torch.sigmoid(i) | ||
170 | + return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) | ||
171 | + | ||
172 | + | ||
173 | +class MemoryEfficientSwish(nn.Module): | ||
174 | + def forward(self, x): | ||
175 | + return SwishImplementation.apply(x) | ||
176 | + | ||
177 | +class Swish(nn.Module): | ||
178 | + def forward(self, x): | ||
179 | + return x * torch.sigmoid(x) | ||
180 | + | ||
181 | + | ||
182 | +def round_filters(filters, global_params): | ||
183 | + """ Calculate and round number of filters based on depth multiplier. """ | ||
184 | + multiplier = global_params.width_coefficient | ||
185 | + if not multiplier: | ||
186 | + return filters | ||
187 | + divisor = global_params.depth_divisor | ||
188 | + min_depth = global_params.min_depth | ||
189 | + filters *= multiplier | ||
190 | + min_depth = min_depth or divisor | ||
191 | + new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor) | ||
192 | + if new_filters < 0.9 * filters: # prevent rounding by more than 10% | ||
193 | + new_filters += divisor | ||
194 | + return int(new_filters) | ||
195 | + | ||
196 | + | ||
197 | +def round_repeats(repeats, global_params): | ||
198 | + """ Round number of filters based on depth multiplier. """ | ||
199 | + multiplier = global_params.depth_coefficient | ||
200 | + if not multiplier: | ||
201 | + return repeats | ||
202 | + return int(math.ceil(multiplier * repeats)) | ||
203 | + | ||
204 | + | ||
205 | +def drop_connect(inputs, p, training): | ||
206 | + """ Drop connect. """ | ||
207 | + if not training: return inputs | ||
208 | + batch_size = inputs.shape[0] | ||
209 | + keep_prob = 1 - p | ||
210 | + random_tensor = keep_prob | ||
211 | + random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device) | ||
212 | + binary_tensor = torch.floor(random_tensor) | ||
213 | + output = inputs / keep_prob * binary_tensor | ||
214 | + return output | ||
215 | + | ||
216 | + | ||
217 | +def get_same_padding_conv2d(image_size=None): | ||
218 | + """ Chooses static padding if you have specified an image size, and dynamic padding otherwise. | ||
219 | + Static padding is necessary for ONNX exporting of models. """ | ||
220 | + if image_size is None: | ||
221 | + return Conv2dDynamicSamePadding | ||
222 | + else: | ||
223 | + return partial(Conv2dStaticSamePadding, image_size=image_size) | ||
224 | + | ||
225 | + | ||
226 | +class Conv2dDynamicSamePadding(nn.Conv2d): | ||
227 | + """ 2D Convolutions like TensorFlow, for a dynamic image size """ | ||
228 | + | ||
229 | + def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): | ||
230 | + super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) | ||
231 | + self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2 | ||
232 | + | ||
233 | + def forward(self, x): | ||
234 | + ih, iw = x.size()[-2:] | ||
235 | + kh, kw = self.weight.size()[-2:] | ||
236 | + sh, sw = self.stride | ||
237 | + oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) | ||
238 | + pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) | ||
239 | + pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) | ||
240 | + if pad_h > 0 or pad_w > 0: | ||
241 | + x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]) | ||
242 | + return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) | ||
243 | + | ||
244 | + | ||
245 | +class Conv2dStaticSamePadding(nn.Conv2d): | ||
246 | + """ 2D Convolutions like TensorFlow, for a fixed image size""" | ||
247 | + | ||
248 | + def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs): | ||
249 | + super().__init__(in_channels, out_channels, kernel_size, **kwargs) | ||
250 | + self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2 | ||
251 | + | ||
252 | + # Calculate padding based on image size and save it | ||
253 | + assert image_size is not None | ||
254 | + ih, iw = image_size if type(image_size) == list else [image_size, image_size] | ||
255 | + kh, kw = self.weight.size()[-2:] | ||
256 | + sh, sw = self.stride | ||
257 | + oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) | ||
258 | + pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0) | ||
259 | + pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0) | ||
260 | + if pad_h > 0 or pad_w > 0: | ||
261 | + self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)) | ||
262 | + else: | ||
263 | + self.static_padding = Identity() | ||
264 | + | ||
265 | + def forward(self, x): | ||
266 | + x = self.static_padding(x) | ||
267 | + x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) | ||
268 | + return x | ||
269 | + | ||
270 | + | ||
271 | +class Identity(nn.Module): | ||
272 | + def __init__(self, ): | ||
273 | + super(Identity, self).__init__() | ||
274 | + | ||
275 | + def forward(self, input): | ||
276 | + return input | ||
277 | + | ||
278 | + | ||
279 | + | ||
280 | +######################################################################## | ||
281 | +############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ############## | ||
282 | +######################################################################## | ||
283 | + | ||
284 | + | ||
285 | +def efficientnet_params(model_name): | ||
286 | + """ Map EfficientNet model name to parameter coefficients. """ | ||
287 | + params_dict = { | ||
288 | + # Coefficients: width,depth,res,dropout | ||
289 | + 'efficientnet-b0': (1.0, 1.0, 224, 0.2), | ||
290 | + 'efficientnet-b1': (1.0, 1.1, 240, 0.2), | ||
291 | + 'efficientnet-b2': (1.1, 1.2, 260, 0.3), | ||
292 | + 'efficientnet-b3': (1.2, 1.4, 300, 0.3), | ||
293 | + 'efficientnet-b4': (1.4, 1.8, 380, 0.4), | ||
294 | + 'efficientnet-b5': (1.6, 2.2, 456, 0.4), | ||
295 | + 'efficientnet-b6': (1.8, 2.6, 528, 0.5), | ||
296 | + 'efficientnet-b7': (2.0, 3.1, 600, 0.5), | ||
297 | + 'efficientnet-b8': (2.2, 3.6, 672, 0.5), | ||
298 | + 'efficientnet-l2': (4.3, 5.3, 800, 0.5), | ||
299 | + } | ||
300 | + return params_dict[model_name] | ||
301 | + | ||
302 | + | ||
303 | +class BlockDecoder(object): | ||
304 | + """ Block Decoder for readability, straight from the official TensorFlow repository """ | ||
305 | + | ||
306 | + @staticmethod | ||
307 | + def _decode_block_string(block_string): | ||
308 | + """ Gets a block through a string notation of arguments. """ | ||
309 | + assert isinstance(block_string, str) | ||
310 | + | ||
311 | + ops = block_string.split('_') | ||
312 | + options = {} | ||
313 | + for op in ops: | ||
314 | + splits = re.split(r'(\d.*)', op) | ||
315 | + if len(splits) >= 2: | ||
316 | + key, value = splits[:2] | ||
317 | + options[key] = value | ||
318 | + | ||
319 | + # Check stride | ||
320 | + assert (('s' in options and len(options['s']) == 1) or | ||
321 | + (len(options['s']) == 2 and options['s'][0] == options['s'][1])) | ||
322 | + | ||
323 | + return BlockArgs( | ||
324 | + kernel_size=int(options['k']), | ||
325 | + num_repeat=int(options['r']), | ||
326 | + input_filters=int(options['i']), | ||
327 | + output_filters=int(options['o']), | ||
328 | + expand_ratio=int(options['e']), | ||
329 | + id_skip=('noskip' not in block_string), | ||
330 | + se_ratio=float(options['se']) if 'se' in options else None, | ||
331 | + stride=[int(options['s'][0])]) | ||
332 | + | ||
333 | + @staticmethod | ||
334 | + def _encode_block_string(block): | ||
335 | + """Encodes a block to a string.""" | ||
336 | + args = [ | ||
337 | + 'r%d' % block.num_repeat, | ||
338 | + 'k%d' % block.kernel_size, | ||
339 | + 's%d%d' % (block.strides[0], block.strides[1]), | ||
340 | + 'e%s' % block.expand_ratio, | ||
341 | + 'i%d' % block.input_filters, | ||
342 | + 'o%d' % block.output_filters | ||
343 | + ] | ||
344 | + if 0 < block.se_ratio <= 1: | ||
345 | + args.append('se%s' % block.se_ratio) | ||
346 | + if block.id_skip is False: | ||
347 | + args.append('noskip') | ||
348 | + return '_'.join(args) | ||
349 | + | ||
350 | + @staticmethod | ||
351 | + def decode(string_list): | ||
352 | + """ | ||
353 | + Decodes a list of string notations to specify blocks inside the network. | ||
354 | + :param string_list: a list of strings, each string is a notation of block | ||
355 | + :return: a list of BlockArgs namedtuples of block args | ||
356 | + """ | ||
357 | + assert isinstance(string_list, list) | ||
358 | + blocks_args = [] | ||
359 | + for block_string in string_list: | ||
360 | + blocks_args.append(BlockDecoder._decode_block_string(block_string)) | ||
361 | + return blocks_args | ||
362 | + | ||
363 | + @staticmethod | ||
364 | + def encode(blocks_args): | ||
365 | + """ | ||
366 | + Encodes a list of BlockArgs to a list of strings. | ||
367 | + :param blocks_args: a list of BlockArgs namedtuples of block args | ||
368 | + :return: a list of strings, each string is a notation of block | ||
369 | + """ | ||
370 | + block_strings = [] | ||
371 | + for block in blocks_args: | ||
372 | + block_strings.append(BlockDecoder._encode_block_string(block)) | ||
373 | + return block_strings | ||
374 | + | ||
375 | + | ||
376 | +def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2, | ||
377 | + drop_connect_rate=0.2, image_size=None, num_classes=2): | ||
378 | + """ Creates a efficientnet model. """ | ||
379 | + | ||
380 | + blocks_args = [ | ||
381 | + 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', | ||
382 | + 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', | ||
383 | + 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', | ||
384 | + 'r1_k3_s11_e6_i192_o320_se0.25', | ||
385 | + ] | ||
386 | + blocks_args = BlockDecoder.decode(blocks_args) | ||
387 | + | ||
388 | + global_params = GlobalParams( | ||
389 | + batch_norm_momentum=0.99, | ||
390 | + batch_norm_epsilon=1e-3, | ||
391 | + dropout_rate=dropout_rate, | ||
392 | + drop_connect_rate=drop_connect_rate, | ||
393 | + # data_format='channels_last', # removed, this is always true in PyTorch | ||
394 | + num_classes=num_classes, | ||
395 | + width_coefficient=width_coefficient, | ||
396 | + depth_coefficient=depth_coefficient, | ||
397 | + depth_divisor=8, | ||
398 | + min_depth=None, | ||
399 | + image_size=image_size, | ||
400 | + ) | ||
401 | + | ||
402 | + return blocks_args, global_params | ||
403 | + | ||
404 | + | ||
405 | +def get_model_params(model_name, override_params): | ||
406 | + """ Get the block args and global params for a given model """ | ||
407 | + if model_name.startswith('efficientnet'): | ||
408 | + w, d, s, p = efficientnet_params(model_name) | ||
409 | + # note: all models have drop connect rate = 0.2 | ||
410 | + blocks_args, global_params = efficientnet( | ||
411 | + width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s) | ||
412 | + else: | ||
413 | + raise NotImplementedError('model name is not pre-defined: %s' % model_name) | ||
414 | + if override_params: | ||
415 | + # ValueError will be raised here if override_params has fields not included in global_params. | ||
416 | + global_params = global_params._replace(**override_params) | ||
417 | + return blocks_args, global_params | ||
418 | + | ||
419 | + | ||
420 | +url_map = { | ||
421 | + 'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth', | ||
422 | + 'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth', | ||
423 | + 'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth', | ||
424 | + 'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth', | ||
425 | + 'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth', | ||
426 | + 'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth', | ||
427 | + 'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth', | ||
428 | + 'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth', | ||
429 | +} | ||
430 | + | ||
431 | + | ||
432 | +url_map_advprop = { | ||
433 | + 'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth', | ||
434 | + 'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth', | ||
435 | + 'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth', | ||
436 | + 'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth', | ||
437 | + 'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth', | ||
438 | + 'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth', | ||
439 | + 'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth', | ||
440 | + 'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth', | ||
441 | + 'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth', | ||
442 | +} | ||
443 | + | ||
444 | + | ||
445 | +def load_pretrained_weights(model, model_name, load_fc=True, advprop=False): | ||
446 | + """ Loads pretrained weights, and downloads if loading for the first time. """ | ||
447 | + # AutoAugment or Advprop (different preprocessing) | ||
448 | + url_map_ = url_map_advprop if advprop else url_map | ||
449 | + state_dict = model_zoo.load_url(url_map_[model_name]) | ||
450 | + if load_fc: | ||
451 | + model.load_state_dict(state_dict) | ||
452 | + else: | ||
453 | + state_dict.pop('_fc.weight') | ||
454 | + state_dict.pop('_fc.bias') | ||
455 | + res = model.load_state_dict(state_dict, strict=False) | ||
456 | + assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights' | ||
457 | + print('Loaded pretrained weights for {}'.format(model_name)) | ||
458 | + | ||
459 | + | ||
460 | +## noising ## | ||
461 | +def stochastic_depth(inputs, is_training, stochastic_depth_rate=0.2): | ||
462 | + '''Apply stochastic depth.''' | ||
463 | + if not is_training: | ||
464 | + return inputs | ||
465 | + | ||
466 | + # Compute keep_prob | ||
467 | + # TODO(tanmingxing): add support for training progress. | ||
468 | + keep_prob = 1.0 - stochastic_depth_rate | ||
469 | + | ||
470 | + # Compute stochastic_depth tensor | ||
471 | + batch_size = inputs.shape[0] | ||
472 | + random_tensor = keep_prob | ||
473 | + random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype) | ||
474 | + binary_tensor = torch.floor(random_tensor) | ||
475 | + output = torch.div(inputs, keep_prob) * binary_tensor | ||
476 | + return output | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
No preview for this file type
No preview for this file type
visualize/grad_cam.py
0 → 100644
1 | +from __future__ import print_function | ||
2 | +from model import mobilenetv3 | ||
3 | + | ||
4 | +import os | ||
5 | +import glob | ||
6 | +import copy | ||
7 | +import os.path as osp | ||
8 | +import click | ||
9 | +import cv2 | ||
10 | +import matplotlib.cm as cm | ||
11 | +import numpy as np | ||
12 | +import torch | ||
13 | +import torch.nn.functional as F | ||
14 | +from torchvision import models, transforms | ||
15 | + | ||
16 | +from visualize.grad_cam_utils import ( | ||
17 | + BackPropagation, | ||
18 | + Deconvnet, | ||
19 | + GradCAM, | ||
20 | + GuidedBackPropagation, | ||
21 | + occlusion_sensitivity, | ||
22 | +) | ||
23 | + | ||
24 | +# if a model includes LSTM, such as in image captioning, | ||
25 | +# torch.backends.cudnn.enabled = False | ||
26 | + | ||
27 | + | ||
28 | +def get_device(cuda): | ||
29 | + cuda = cuda and torch.cuda.is_available() | ||
30 | + device = torch.device("cuda" if cuda else "cpu") | ||
31 | + if cuda: | ||
32 | + current_device = torch.cuda.current_device() | ||
33 | + print("Device:", torch.cuda.get_device_name(current_device)) | ||
34 | + else: | ||
35 | + print("Device: CPU") | ||
36 | + return device | ||
37 | + | ||
38 | +def change_image_path(image_paths): | ||
39 | + paths = [] | ||
40 | + for image_path in image_paths: | ||
41 | + target_dir = os.path.normpath(image_path) | ||
42 | + | ||
43 | + for (path, dir, files) in os.walk(target_dir): | ||
44 | + for fname in files: | ||
45 | + fullfname = path + "/" + fname | ||
46 | + paths.append(fullfname) | ||
47 | + return paths | ||
48 | + | ||
49 | +def load_images(image_paths, normalize_factor): | ||
50 | + images = [] | ||
51 | + raw_images = [] | ||
52 | + | ||
53 | + for i, image_path in enumerate(image_paths): | ||
54 | + image, raw_image = preprocess(image_path, normalize_factor) | ||
55 | + images.append(image) | ||
56 | + raw_images.append(raw_image) | ||
57 | + return images, raw_images, image_paths | ||
58 | + | ||
59 | + | ||
60 | +def get_classtable(): | ||
61 | + classes = ["Error", "Normal"] | ||
62 | + """ | ||
63 | + with open("samples/synset_words.txt") as lines: | ||
64 | + for line in lines: | ||
65 | + line = line.strip().split(" ", 1)[1] | ||
66 | + line = line.split(", ", 1)[0].replace(" ", "_") | ||
67 | + classes.append(line) | ||
68 | + """ | ||
69 | + return classes | ||
70 | + | ||
71 | + | ||
72 | +def preprocess(image_path, normalize_factor): | ||
73 | + if normalize_factor != None: | ||
74 | + mean = normalize_factor[0] | ||
75 | + std = normalize_factor[1] | ||
76 | + | ||
77 | + raw_image = cv2.imread(image_path,0) | ||
78 | + raw_image = cv2.resize(raw_image, (128,) * 2) | ||
79 | + | ||
80 | + if normalize_factor != None: | ||
81 | + image = transforms.Compose( | ||
82 | + [ | ||
83 | + transforms.ToTensor(), | ||
84 | + transforms.Normalize(mean=mean, std=std), | ||
85 | + ] | ||
86 | + )(raw_image[..., ::-1].copy()) | ||
87 | + else: | ||
88 | + image = transforms.Compose( | ||
89 | + [ | ||
90 | + transforms.ToTensor(), | ||
91 | + ] | ||
92 | + )(raw_image[..., ::-1].copy()) | ||
93 | + return image, raw_image | ||
94 | + | ||
95 | + | ||
96 | +def save_gradient(filename, gradient): | ||
97 | + gradient = gradient.cpu().numpy().transpose(1, 2, 0) | ||
98 | + gradient -= gradient.min() | ||
99 | + gradient /= gradient.max() | ||
100 | + gradient *= 255.0 | ||
101 | + cv2.imwrite(filename, np.uint8(gradient)) | ||
102 | + | ||
103 | + | ||
104 | +def save_gradcam(filename, gcam, raw_image, paper_cmap=False): | ||
105 | + gcam = gcam.cpu().numpy() | ||
106 | + cmap = cm.jet_r(gcam)[..., :3] * 255.0 | ||
107 | + if paper_cmap: | ||
108 | + alpha = gcam[..., None] | ||
109 | + gcam = alpha * cmap + (1 - alpha) * raw_image | ||
110 | + else: | ||
111 | + gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2 | ||
112 | + cv2.imwrite(filename, np.uint8(gcam)) | ||
113 | + | ||
114 | + | ||
115 | +def save_sensitivity(filename, maps): | ||
116 | + maps = maps.cpu().numpy() | ||
117 | + scale = max(maps[maps > 0].max(), -maps[maps <= 0].min()) | ||
118 | + maps = maps / scale * 0.5 | ||
119 | + maps += 0.5 | ||
120 | + maps = cm.bwr_r(maps)[..., :3] | ||
121 | + maps = np.uint8(maps * 255.0) | ||
122 | + maps = cv2.resize(maps, (224, 224), interpolation=cv2.INTER_NEAREST) | ||
123 | + cv2.imwrite(filename, maps) | ||
124 | + | ||
125 | + | ||
126 | +# torchvision models | ||
127 | +model_names = sorted( | ||
128 | + name | ||
129 | + for name in models.__dict__ | ||
130 | + if name.islower() and not name.startswith("__") and callable(models.__dict__[name]) | ||
131 | +) | ||
132 | + | ||
133 | + | ||
134 | +@click.group() | ||
135 | +@click.pass_context | ||
136 | +def main(ctx): | ||
137 | + print("Mode:", ctx.invoked_subcommand) | ||
138 | + | ||
139 | + | ||
140 | +def make_grad_cam(image_paths, output_dir, model, normalize_factor, cam_class, args, target_layer="module.features.5", arch="MobilenetV3", topk=1, cuda=True): | ||
141 | + """ | ||
142 | + Visualize model responses given multiple images | ||
143 | + """ | ||
144 | + | ||
145 | + if not os.path.exists(output_dir): | ||
146 | + os.makedirs(output_dir) | ||
147 | + | ||
148 | + image_paths = image_paths + "/*" | ||
149 | + image_paths = glob.glob(image_paths) | ||
150 | + if len(image_paths) == 0: | ||
151 | + print("There's no images in folder!") | ||
152 | + return | ||
153 | + device = get_device(cuda) | ||
154 | + | ||
155 | + # Synset words | ||
156 | + classes = get_classtable() | ||
157 | + | ||
158 | + # Model from torchvision | ||
159 | + #model = models.__dict__[arch](pretrained=True) | ||
160 | + | ||
161 | + model.to(device) | ||
162 | + model.eval() | ||
163 | + image_paths.sort() | ||
164 | + image_paths.remove(f'eval_results/{args["task"]}/error_case/cam') | ||
165 | + # Images | ||
166 | + images, raw_images, _ = load_images(image_paths, normalize_factor) | ||
167 | + | ||
168 | + images = torch.stack(images).to(device) | ||
169 | + | ||
170 | + | ||
171 | + """ | ||
172 | + Common usage: | ||
173 | + 1. Wrap your model with visualization classes defined in grad_cam.py | ||
174 | + 2. Run forward() with images | ||
175 | + 3. Run backward() with a list of specific classes | ||
176 | + 4. Run generate() to export results | ||
177 | + """ | ||
178 | + | ||
179 | + # ========================================================================= | ||
180 | + #print("Vanilla Backpropagation:") | ||
181 | + | ||
182 | + bp = BackPropagation(model=model) | ||
183 | + probs, ids = bp.forward(images) # sorted | ||
184 | + | ||
185 | + | ||
186 | + for i in range(topk): | ||
187 | + bp.backward(ids=ids[:, [i]]) | ||
188 | + gradients = bp.generate() | ||
189 | + | ||
190 | + # Remove all the hook function in the "model" | ||
191 | + bp.remove_hook() | ||
192 | + | ||
193 | + | ||
194 | + # ========================================================================= | ||
195 | + print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:") | ||
196 | + | ||
197 | + gcam = GradCAM(model=model) | ||
198 | + _ = gcam.forward(images) | ||
199 | + | ||
200 | + gbp = GuidedBackPropagation(model=model) | ||
201 | + _ = gbp.forward(images) | ||
202 | + | ||
203 | + for i in range(topk): | ||
204 | + # Guided Backpropagation | ||
205 | + gbp.backward(ids=ids[:, [i]]) | ||
206 | + gradients = gbp.generate() | ||
207 | + | ||
208 | + # Grad-CAM | ||
209 | + gcam.backward(ids=ids[:, [i]]) | ||
210 | + regions = gcam.generate(target_layer=target_layer) | ||
211 | + | ||
212 | + for j in range(len(images)): | ||
213 | + #print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) | ||
214 | + # Grad-CAM | ||
215 | + #if classes[ids[j, i]] == cam_class: | ||
216 | + print(image_paths[j]) | ||
217 | + save_gradcam( | ||
218 | + filename=osp.join( | ||
219 | + output_dir, | ||
220 | + "{}-{}-gradcam-{}-{}.png".format( | ||
221 | + image_paths[j].split('/')[-1], arch, target_layer, classes[ids[j, i]] | ||
222 | + ), | ||
223 | + ), | ||
224 | + gcam=regions[j, 0], | ||
225 | + raw_image=np.expand_dims(raw_images[j],axis=2), | ||
226 | + ) | ||
227 | + | ||
228 | + | ||
229 | +@main.command() | ||
230 | +@click.option("-i", "--image-paths", type=str, multiple=True, required=True) | ||
231 | +@click.option("-o", "--output-dir", type=str, default="./results") | ||
232 | +@click.option("--cuda/--cpu", default=True) | ||
233 | +def demo2(image_paths, output_dir, cuda): | ||
234 | + """ | ||
235 | + Generate Grad-CAM at different layers of ResNet-152 | ||
236 | + """ | ||
237 | + | ||
238 | + device = get_device(cuda) | ||
239 | + | ||
240 | + # Synset words | ||
241 | + classes = get_classtable() | ||
242 | + | ||
243 | + # Model | ||
244 | + model = models.resnet152(pretrained=True) | ||
245 | + model.to(device) | ||
246 | + model.eval() | ||
247 | + | ||
248 | + # The four residual layers | ||
249 | + target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"] | ||
250 | + target_class = 243 # "bull mastif" | ||
251 | + | ||
252 | + # Images | ||
253 | + images, raw_images, _ = load_images(image_paths) | ||
254 | + images = torch.stack(images).to(device) | ||
255 | + | ||
256 | + gcam = GradCAM(model=model) | ||
257 | + probs, ids = gcam.forward(images) | ||
258 | + ids_ = torch.LongTensor([[target_class]] * len(images)).to(device) | ||
259 | + gcam.backward(ids=ids_) | ||
260 | + | ||
261 | + for target_layer in target_layers: | ||
262 | + print("Generating Grad-CAM @{}".format(target_layer)) | ||
263 | + | ||
264 | + # Grad-CAM | ||
265 | + regions = gcam.generate(target_layer=target_layer) | ||
266 | + | ||
267 | + for j in range(len(images)): | ||
268 | + print( | ||
269 | + "\t#{}: {} ({:.5f})".format( | ||
270 | + j, classes[target_class], float(probs[ids == target_class]) | ||
271 | + ) | ||
272 | + ) | ||
273 | + | ||
274 | + save_gradcam( | ||
275 | + filename=osp.join( | ||
276 | + output_dir, | ||
277 | + "{}-{}-gradcam-{}-{}.png".format( | ||
278 | + j, "resnet152", target_layer, classes[target_class] | ||
279 | + ), | ||
280 | + ), | ||
281 | + gcam=regions[j, 0], | ||
282 | + raw_image=raw_images[j], | ||
283 | + ) | ||
284 | + | ||
285 | + | ||
286 | +@main.command() | ||
287 | +@click.option("-i", "--image-paths", type=str, multiple=True, required=True) | ||
288 | +@click.option("-a", "--arch", type=click.Choice(model_names), required=True) | ||
289 | +@click.option("-k", "--topk", type=int, default=3) | ||
290 | +@click.option("-s", "--stride", type=int, default=1) | ||
291 | +@click.option("-b", "--n-batches", type=int, default=128) | ||
292 | +@click.option("-o", "--output-dir", type=str, default="./results") | ||
293 | +@click.option("--cuda/--cpu", default=True) | ||
294 | +def demo3(image_paths, arch, topk, stride, n_batches, output_dir, cuda): | ||
295 | + """ | ||
296 | + Generate occlusion sensitivity maps | ||
297 | + """ | ||
298 | + | ||
299 | + device = get_device(cuda) | ||
300 | + | ||
301 | + # Synset words | ||
302 | + classes = get_classtable() | ||
303 | + | ||
304 | + # Model from torchvision | ||
305 | + model = models.__dict__[arch](pretrained=True) | ||
306 | + model = torch.nn.DataParallel(model) | ||
307 | + model.to(device) | ||
308 | + model.eval() | ||
309 | + | ||
310 | + # Images | ||
311 | + images, _, _ = load_images(image_paths) | ||
312 | + images = torch.stack(images).to(device) | ||
313 | + | ||
314 | + print("Occlusion Sensitivity:") | ||
315 | + | ||
316 | + patche_sizes = [10, 15, 25, 35, 45, 90] | ||
317 | + | ||
318 | + logits = model(images) | ||
319 | + probs = F.softmax(logits, dim=1) | ||
320 | + probs, ids = probs.sort(dim=1, descending=True) | ||
321 | + | ||
322 | + for i in range(topk): | ||
323 | + for p in patche_sizes: | ||
324 | + print("Patch:", p) | ||
325 | + sensitivity = occlusion_sensitivity( | ||
326 | + model, images, ids[:, [i]], patch=p, stride=stride, n_batches=n_batches | ||
327 | + ) | ||
328 | + | ||
329 | + # Save results as image files | ||
330 | + for j in range(len(images)): | ||
331 | + print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i])) | ||
332 | + | ||
333 | + save_sensitivity( | ||
334 | + filename=osp.join( | ||
335 | + output_dir, | ||
336 | + "{}-{}-sensitivity-{}-{}.png".format( | ||
337 | + j, arch, p, classes[ids[j, i]] | ||
338 | + ), | ||
339 | + ), | ||
340 | + maps=sensitivity[j], | ||
341 | + ) | ||
342 | + | ||
343 | + | ||
344 | +if __name__ == "__main__": | ||
345 | + main() |
visualize/grad_cam_utils.py
0 → 100644
1 | +#!/usr/bin/env python | ||
2 | +# coding: utf-8 | ||
3 | +# | ||
4 | +# Author: Kazuto Nakashima | ||
5 | +# URL: http://kazuto1011.github.io | ||
6 | +# Created: 2017-05-26 | ||
7 | + | ||
8 | +from collections import Sequence | ||
9 | + | ||
10 | +import numpy as np | ||
11 | +import torch | ||
12 | +import torch.nn as nn | ||
13 | +from torch.nn import functional as F | ||
14 | +from tqdm import tqdm | ||
15 | + | ||
16 | + | ||
17 | +class _BaseWrapper(object): | ||
18 | + def __init__(self, model): | ||
19 | + super(_BaseWrapper, self).__init__() | ||
20 | + self.device = next(model.parameters()).device | ||
21 | + self.model = model | ||
22 | + self.handlers = [] # a set of hook function handlers | ||
23 | + | ||
24 | + def _encode_one_hot(self, ids): | ||
25 | + one_hot = torch.zeros_like(self.logits).to(self.device) | ||
26 | + one_hot.scatter_(1, ids, 1.0) | ||
27 | + return one_hot | ||
28 | + | ||
29 | + def forward(self, image): | ||
30 | + self.image_shape = image.shape[2:] #채널 사이즈 | ||
31 | + self.logits = self.model(image) | ||
32 | + self.probs = F.softmax(self.logits, dim=1) | ||
33 | + return self.probs.sort(dim=1, descending=True) # ordered results | ||
34 | + | ||
35 | + def backward(self, ids): | ||
36 | + """ | ||
37 | + Class-specific backpropagation | ||
38 | + """ | ||
39 | + one_hot = self._encode_one_hot(ids) | ||
40 | + self.model.zero_grad() | ||
41 | + self.logits.backward(gradient=one_hot, retain_graph=True) | ||
42 | + | ||
43 | + def generate(self): | ||
44 | + raise NotImplementedError | ||
45 | + | ||
46 | + def remove_hook(self): | ||
47 | + """ | ||
48 | + Remove all the forward/backward hook functions | ||
49 | + """ | ||
50 | + for handle in self.handlers: | ||
51 | + handle.remove() | ||
52 | + | ||
53 | + | ||
54 | +class BackPropagation(_BaseWrapper): | ||
55 | + def forward(self, image): | ||
56 | + self.image = image.requires_grad_() | ||
57 | + return super(BackPropagation, self).forward(self.image) | ||
58 | + | ||
59 | + def generate(self): | ||
60 | + gradient = self.image.grad.clone() | ||
61 | + self.image.grad.zero_() | ||
62 | + return gradient | ||
63 | + | ||
64 | + | ||
65 | +class GuidedBackPropagation(BackPropagation): | ||
66 | + """ | ||
67 | + "Striving for Simplicity: the All Convolutional Net" | ||
68 | + https://arxiv.org/pdf/1412.6806.pdf | ||
69 | + Look at Figure 1 on page 8. | ||
70 | + """ | ||
71 | + | ||
72 | + def __init__(self, model): | ||
73 | + super(GuidedBackPropagation, self).__init__(model) | ||
74 | + | ||
75 | + def backward_hook(module, grad_in, grad_out): | ||
76 | + # Cut off negative gradients | ||
77 | + if isinstance(module, nn.ReLU): | ||
78 | + return (F.relu(grad_in[0]),) | ||
79 | + | ||
80 | + for module in self.model.named_modules(): | ||
81 | + self.handlers.append(module[1].register_backward_hook(backward_hook)) | ||
82 | + | ||
83 | + | ||
84 | +class Deconvnet(BackPropagation): | ||
85 | + """ | ||
86 | + "Striving for Simplicity: the All Convolutional Net" | ||
87 | + https://arxiv.org/pdf/1412.6806.pdf | ||
88 | + Look at Figure 1 on page 8. | ||
89 | + """ | ||
90 | + | ||
91 | + def __init__(self, model): | ||
92 | + super(Deconvnet, self).__init__(model) | ||
93 | + | ||
94 | + def backward_hook(module, grad_in, grad_out): | ||
95 | + # Cut off negative gradients and ignore ReLU | ||
96 | + if isinstance(module, nn.ReLU): | ||
97 | + return (F.relu(grad_out[0]),) | ||
98 | + | ||
99 | + for module in self.model.named_modules(): | ||
100 | + self.handlers.append(module[1].register_backward_hook(backward_hook)) | ||
101 | + | ||
102 | + | ||
103 | +class GradCAM(_BaseWrapper): | ||
104 | + """ | ||
105 | + "Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization" | ||
106 | + https://arxiv.org/pdf/1610.02391.pdf | ||
107 | + Look at Figure 2 on page 4 | ||
108 | + """ | ||
109 | + | ||
110 | + def __init__(self, model, candidate_layers=None): | ||
111 | + super(GradCAM, self).__init__(model) | ||
112 | + self.fmap_pool = {} | ||
113 | + self.grad_pool = {} | ||
114 | + self.candidate_layers = candidate_layers # list | ||
115 | + | ||
116 | + def save_fmaps(key): | ||
117 | + def forward_hook(module, input, output): | ||
118 | + self.fmap_pool[key] = output.detach() | ||
119 | + | ||
120 | + return forward_hook | ||
121 | + | ||
122 | + def save_grads(key): | ||
123 | + def backward_hook(module, grad_in, grad_out): | ||
124 | + self.grad_pool[key] = grad_out[0].detach() | ||
125 | + | ||
126 | + return backward_hook | ||
127 | + | ||
128 | + # If any candidates are not specified, the hook is registered to all the layers. | ||
129 | + for name, module in self.model.named_modules(): | ||
130 | + if self.candidate_layers is None or name in self.candidate_layers: | ||
131 | + self.handlers.append(module.register_forward_hook(save_fmaps(name))) | ||
132 | + self.handlers.append(module.register_backward_hook(save_grads(name))) | ||
133 | + | ||
134 | + def _find(self, pool, target_layer): | ||
135 | + if target_layer in pool.keys(): | ||
136 | + return pool[target_layer] | ||
137 | + else: | ||
138 | + raise ValueError("Invalid layer name: {}".format(target_layer)) | ||
139 | + | ||
140 | + def generate(self, target_layer): | ||
141 | + fmaps = self._find(self.fmap_pool, target_layer) | ||
142 | + grads = self._find(self.grad_pool, target_layer) | ||
143 | + weights = F.adaptive_avg_pool2d(grads, 1) | ||
144 | + | ||
145 | + gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True) | ||
146 | + gcam = F.relu(gcam) | ||
147 | + gcam = F.interpolate( | ||
148 | + gcam, self.image_shape, mode="bilinear", align_corners=False | ||
149 | + ) | ||
150 | + | ||
151 | + B, C, H, W = gcam.shape | ||
152 | + gcam = gcam.view(B, -1) | ||
153 | + gcam -= gcam.min(dim=1, keepdim=True)[0] | ||
154 | + gcam /= gcam.max(dim=1, keepdim=True)[0] | ||
155 | + gcam = gcam.view(B, C, H, W) | ||
156 | + | ||
157 | + return gcam | ||
158 | + | ||
159 | + | ||
160 | +def occlusion_sensitivity( | ||
161 | + model, images, ids, mean=None, patch=35, stride=1, n_batches=128 | ||
162 | +): | ||
163 | + """ | ||
164 | + "Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization" | ||
165 | + https://arxiv.org/pdf/1610.02391.pdf | ||
166 | + Look at Figure A5 on page 17 | ||
167 | + | ||
168 | + Originally proposed in: | ||
169 | + "Visualizing and Understanding Convolutional Networks" | ||
170 | + https://arxiv.org/abs/1311.2901 | ||
171 | + """ | ||
172 | + | ||
173 | + torch.set_grad_enabled(False) | ||
174 | + model.eval() | ||
175 | + mean = mean if mean else 0 | ||
176 | + patch_H, patch_W = patch if isinstance(patch, Sequence) else (patch, patch) | ||
177 | + pad_H, pad_W = patch_H // 2, patch_W // 2 | ||
178 | + | ||
179 | + # Padded image | ||
180 | + images = F.pad(images, (pad_W, pad_W, pad_H, pad_H), value=mean) | ||
181 | + B, _, H, W = images.shape | ||
182 | + new_H = (H - patch_H) // stride + 1 | ||
183 | + new_W = (W - patch_W) // stride + 1 | ||
184 | + | ||
185 | + # Prepare sampling grids | ||
186 | + anchors = [] | ||
187 | + grid_h = 0 | ||
188 | + while grid_h <= H - patch_H: | ||
189 | + grid_w = 0 | ||
190 | + while grid_w <= W - patch_W: | ||
191 | + grid_w += stride | ||
192 | + anchors.append((grid_h, grid_w)) | ||
193 | + grid_h += stride | ||
194 | + | ||
195 | + # Baseline score without occlusion | ||
196 | + baseline = model(images).detach().gather(1, ids) | ||
197 | + | ||
198 | + # Compute per-pixel logits | ||
199 | + scoremaps = [] | ||
200 | + for i in tqdm(range(0, len(anchors), n_batches), leave=False): | ||
201 | + batch_images = [] | ||
202 | + batch_ids = [] | ||
203 | + for grid_h, grid_w in anchors[i : i + n_batches]: | ||
204 | + images_ = images.clone() | ||
205 | + images_[..., grid_h : grid_h + patch_H, grid_w : grid_w + patch_W] = mean | ||
206 | + batch_images.append(images_) | ||
207 | + batch_ids.append(ids) | ||
208 | + batch_images = torch.cat(batch_images, dim=0) | ||
209 | + batch_ids = torch.cat(batch_ids, dim=0) | ||
210 | + scores = model(batch_images).detach().gather(1, batch_ids) | ||
211 | + scoremaps += list(torch.split(scores, B)) | ||
212 | + | ||
213 | + diffmaps = torch.cat(scoremaps, dim=1) - baseline | ||
214 | + diffmaps = diffmaps.view(B, new_H, new_W) | ||
215 | + | ||
216 | + return diffmaps |
보고서/계획서.docx
0 → 100644
No preview for this file type
-
Please register or login to post a comment